<?xml version="1.0"?>
<!--Phoronix Test Suite v10.8.4-->
<PhoronixTestSuite>
  <SuiteInformation>
    <Title>lg Suite</Title>
    <Version>1.0.0</Version>
    <TestType>System</TestType>
    <Description>Test suite extracted from lg.</Description>
    <Maintainer> </Maintainer>
  </SuiteInformation>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/age-gender-recognition-retail-0013/FP16-INT8/age-gender-recognition-retail-0013.xml -d CPU</Arguments>
    <Description>Model: Age Gender Recognition Retail 0013 FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/handwritten-english-recognition-0001/FP16-INT8/handwritten-english-recognition-0001.xml -d CPU</Arguments>
    <Description>Model: Handwritten English Recognition FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013.xml -d CPU</Arguments>
    <Description>Model: Age Gender Recognition Retail 0013 FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/handwritten-english-recognition-0001/FP16/handwritten-english-recognition-0001.xml -d CPU</Arguments>
    <Description>Model: Handwritten English Recognition FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/person-vehicle-bike-detection-2004/FP16/person-vehicle-bike-detection-2004.xml -d CPU</Arguments>
    <Description>Model: Person Vehicle Bike Detection FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/weld-porosity-detection-0001/FP16-INT8/weld-porosity-detection-0001.xml -d CPU</Arguments>
    <Description>Model: Weld Porosity Detection FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/machine-translation-nar-en-de-0002/FP16/machine-translation-nar-en-de-0002.xml -d CPU</Arguments>
    <Description>Model: Machine Translation EN To DE FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/road-segmentation-adas-0001/FP16-INT8/road-segmentation-adas-0001.xml -d CPU</Arguments>
    <Description>Model: Road Segmentation ADAS FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/face-detection-retail-0005/FP16-INT8/face-detection-retail-0005.xml -d CPU</Arguments>
    <Description>Model: Face Detection Retail FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/weld-porosity-detection-0001/FP16/weld-porosity-detection-0001.xml -d CPU</Arguments>
    <Description>Model: Weld Porosity Detection FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/vehicle-detection-0202/FP16-INT8/vehicle-detection-0202.xml -d CPU</Arguments>
    <Description>Model: Vehicle Detection FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/road-segmentation-adas-0001/FP16/road-segmentation-adas-0001.xml -d CPU</Arguments>
    <Description>Model: Road Segmentation ADAS FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/face-detection-retail-0005/FP16/face-detection-retail-0005.xml -d CPU</Arguments>
    <Description>Model: Face Detection Retail FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/face-detection-0206/FP16-INT8/face-detection-0206.xml -d CPU</Arguments>
    <Description>Model: Face Detection FP16-INT8 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/vehicle-detection-0202/FP16/vehicle-detection-0202.xml -d CPU</Arguments>
    <Description>Model: Vehicle Detection FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/person-detection-0303/FP32/person-detection-0303.xml -d CPU</Arguments>
    <Description>Model: Person Detection FP32 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/person-detection-0303/FP16/person-detection-0303.xml -d CPU</Arguments>
    <Description>Model: Person Detection FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/openvino-1.4.0</Test>
    <Arguments>-m models/intel/face-detection-0206/FP16/face-detection-0206.xml -d CPU</Arguments>
    <Description>Model: Face Detection FP16 - Device: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/token_classification/bert-base/pytorch/huggingface/conll2003/base-none --scenario sync</Arguments>
    <Description>Model: NLP Token Classification, BERT base uncased conll2003 - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/token_classification/bert-base/pytorch/huggingface/conll2003/base-none --scenario async</Arguments>
    <Description>Model: NLP Token Classification, BERT base uncased conll2003 - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none --input_shapes='[1,128]' --scenario sync</Arguments>
    <Description>Model: BERT-Large, NLP Question Answering, Sparse INT8 - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none --input_shapes='[1,128]' --scenario async</Arguments>
    <Description>Model: BERT-Large, NLP Question Answering, Sparse INT8 - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/segmentation/yolact-darknet53/pytorch/dbolya/coco/pruned90-none --scenario sync</Arguments>
    <Description>Model: CV Segmentation, 90% Pruned YOLACT Pruned - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/segmentation/yolact-darknet53/pytorch/dbolya/coco/pruned90-none --scenario async</Arguments>
    <Description>Model: CV Segmentation, 90% Pruned YOLACT Pruned - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/text_classification/distilbert-none/pytorch/huggingface/mnli/base-none --scenario sync</Arguments>
    <Description>Model: NLP Text Classification, DistilBERT mnli - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/text_classification/distilbert-none/pytorch/huggingface/mnli/base-none --scenario async</Arguments>
    <Description>Model: NLP Text Classification, DistilBERT mnli - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned85-none --scenario sync</Arguments>
    <Description>Model: CV Detection, YOLOv5s COCO, Sparse INT8 - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned85-none --scenario async</Arguments>
    <Description>Model: CV Detection, YOLOv5s COCO, Sparse INT8 - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/base-none --scenario sync</Arguments>
    <Description>Model: CV Classification, ResNet-50 ImageNet - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/base-none --scenario async</Arguments>
    <Description>Model: CV Classification, ResNet-50 ImageNet - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none --input_shapes='[1,128]' --scenario sync</Arguments>
    <Description>Model: BERT-Large, NLP Question Answering - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none --input_shapes='[1,128]' --scenario async</Arguments>
    <Description>Model: BERT-Large, NLP Question Answering - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none --scenario sync</Arguments>
    <Description>Model: CV Detection, YOLOv5s COCO - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none --scenario async</Arguments>
    <Description>Model: CV Detection, YOLOv5s COCO - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned95_uniform_quant-none --scenario sync</Arguments>
    <Description>Model: ResNet-50, Sparse INT8 - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned95_uniform_quant-none --scenario async</Arguments>
    <Description>Model: ResNet-50, Sparse INT8 - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/base-none --scenario sync</Arguments>
    <Description>Model: ResNet-50, Baseline - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/base-none --scenario async</Arguments>
    <Description>Model: ResNet-50, Baseline - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/sentiment_analysis/oberta-base/pytorch/huggingface/sst2/pruned90_quant-none --input_shapes='[1,128]' --scenario sync</Arguments>
    <Description>Model: NLP Text Classification, BERT base uncased SST2, Sparse INT8 - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/sentiment_analysis/oberta-base/pytorch/huggingface/sst2/pruned90_quant-none --input_shapes='[1,128]' --scenario async</Arguments>
    <Description>Model: NLP Text Classification, BERT base uncased SST2, Sparse INT8 - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/document_classification/obert-base/pytorch/huggingface/imdb/base-none --scenario sync</Arguments>
    <Description>Model: NLP Document Classification, oBERT base uncased on IMDB - Scenario: Synchronous Single-Stream</Description>
  </Execute>
  <Execute>
    <Test>pts/deepsparse-1.6.0</Test>
    <Arguments>zoo:nlp/document_classification/obert-base/pytorch/huggingface/imdb/base-none --scenario async</Arguments>
    <Description>Model: NLP Document Classification, oBERT base uncased on IMDB - Scenario: Asynchronous Multi-Stream</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>rsa4096</Arguments>
    <Description>Algorithm: RSA4096</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>sha512</Arguments>
    <Description>Algorithm: SHA512</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>sha256</Arguments>
    <Description>Algorithm: SHA256</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 3 3 --resolution 1920 1080 --spp 32 --renderer pathtracer</Arguments>
    <Description>Camera: 3 - Resolution: 1080p - Samples Per Pixel: 32 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 2 2 --resolution 1920 1080 --spp 32 --renderer pathtracer</Arguments>
    <Description>Camera: 2 - Resolution: 1080p - Samples Per Pixel: 32 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 1 1 --resolution 1920 1080 --spp 32 --renderer pathtracer</Arguments>
    <Description>Camera: 1 - Resolution: 1080p - Samples Per Pixel: 32 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 3 3 --resolution 1920 1080 --spp 1 --renderer pathtracer</Arguments>
    <Description>Camera: 3 - Resolution: 1080p - Samples Per Pixel: 1 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 2 2 --resolution 1920 1080 --spp 1 --renderer pathtracer</Arguments>
    <Description>Camera: 2 - Resolution: 1080p - Samples Per Pixel: 1 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 1 1 --resolution 1920 1080 --spp 1 --renderer pathtracer</Arguments>
    <Description>Camera: 1 - Resolution: 1080p - Samples Per Pixel: 1 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 3 3 --resolution 3840 2160 --spp 32 --renderer pathtracer</Arguments>
    <Description>Camera: 3 - Resolution: 4K - Samples Per Pixel: 32 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 2 2 --resolution 3840 2160 --spp 32 --renderer pathtracer</Arguments>
    <Description>Camera: 2 - Resolution: 4K - Samples Per Pixel: 32 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 1 1 --resolution 3840 2160 --spp 32 --renderer pathtracer</Arguments>
    <Description>Camera: 1 - Resolution: 4K - Samples Per Pixel: 32 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 3 3 --resolution 3840 2160 --spp 1 --renderer pathtracer</Arguments>
    <Description>Camera: 3 - Resolution: 4K - Samples Per Pixel: 1 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 2 2 --resolution 3840 2160 --spp 1 --renderer pathtracer</Arguments>
    <Description>Camera: 2 - Resolution: 4K - Samples Per Pixel: 1 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/ospray-studio-1.2.0</Test>
    <Arguments>--cameras 1 1 --resolution 3840 2160 --spp 1 --renderer pathtracer</Arguments>
    <Description>Camera: 1 - Resolution: 4K - Samples Per Pixel: 1 - Renderer: Path Tracer - Acceleration: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/build-gem5-1.1.0</Test>
    <Description>Time To Compile</Description>
  </Execute>
  <Execute>
    <Test>pts/build-ffmpeg-6.1.0</Test>
    <Description>Time To Compile</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 13 -i Bosphorus_1920x1080_120fps_420_8bit_YUV.yuv -w 1920 -h 1080</Arguments>
    <Description>Encoder Mode: Preset 13 - Input: Bosphorus 1080p</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 12 -i Bosphorus_1920x1080_120fps_420_8bit_YUV.yuv -w 1920 -h 1080</Arguments>
    <Description>Encoder Mode: Preset 12 - Input: Bosphorus 1080p</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 8 -i Bosphorus_1920x1080_120fps_420_8bit_YUV.yuv -w 1920 -h 1080</Arguments>
    <Description>Encoder Mode: Preset 8 - Input: Bosphorus 1080p</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 4 -n 160 -i Bosphorus_1920x1080_120fps_420_8bit_YUV.yuv -w 1920 -h 1080</Arguments>
    <Description>Encoder Mode: Preset 4 - Input: Bosphorus 1080p</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 13 -i Bosphorus_3840x2160.y4m -w 3840 -h 2160</Arguments>
    <Description>Encoder Mode: Preset 13 - Input: Bosphorus 4K</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 12 -i Bosphorus_3840x2160.y4m -w 3840 -h 2160</Arguments>
    <Description>Encoder Mode: Preset 12 - Input: Bosphorus 4K</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 8 -i Bosphorus_3840x2160.y4m -w 3840 -h 2160</Arguments>
    <Description>Encoder Mode: Preset 8 - Input: Bosphorus 4K</Description>
  </Execute>
  <Execute>
    <Test>pts/svt-av1-2.11.1</Test>
    <Arguments>--preset 4 -n 160 -i Bosphorus_3840x2160.y4m -w 3840 -h 2160</Arguments>
    <Description>Encoder Mode: Preset 4 - Input: Bosphorus 4K</Description>
  </Execute>
  <Execute>
    <Test>pts/embree-1.6.1</Test>
    <Arguments>pathtracer_ispc -c asian_dragon/asian_dragon.ecs</Arguments>
    <Description>Binary: Pathtracer ISPC - Model: Asian Dragon</Description>
  </Execute>
  <Execute>
    <Test>pts/embree-1.6.1</Test>
    <Arguments>pathtracer -c asian_dragon/asian_dragon.ecs</Arguments>
    <Description>Binary: Pathtracer - Model: Asian Dragon</Description>
  </Execute>
  <Execute>
    <Test>pts/embree-1.6.1</Test>
    <Arguments>pathtracer_ispc -c crown/crown.ecs</Arguments>
    <Description>Binary: Pathtracer ISPC - Model: Crown</Description>
  </Execute>
  <Execute>
    <Test>pts/embree-1.6.1</Test>
    <Arguments>pathtracer -c crown/crown.ecs</Arguments>
    <Description>Binary: Pathtracer - Model: Crown</Description>
  </Execute>
  <Execute>
    <Test>pts/webp2-1.2.1</Test>
    <Arguments>-q 95 -effort 7</Arguments>
    <Description>Encode Settings: Quality 95, Compression Effort 7</Description>
  </Execute>
  <Execute>
    <Test>pts/webp2-1.2.1</Test>
    <Arguments>-q 75 -effort 7</Arguments>
    <Description>Encode Settings: Quality 75, Compression Effort 7</Description>
  </Execute>
  <Execute>
    <Test>pts/webp2-1.2.1</Test>
    <Description>Encode Settings: Default</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>h2o</Arguments>
    <Description>Java Test: H2O In-Memory Platform For Machine Learning</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>zxing</Arguments>
    <Description>Java Test: Zxing 1D/2D Barcode Image Processing</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>biojava</Arguments>
    <Description>Java Test: BioJava Biological Data Framework</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>avrora</Arguments>
    <Description>Java Test: Avrora AVR Simulation Framework</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>lusearch</Arguments>
    <Description>Java Test: Apache Lucene Search Engine</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>luindex</Arguments>
    <Description>Java Test: Apache Lucene Search Index</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>pmd</Arguments>
    <Description>Java Test: PMD Source Code Analyzer</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>fop</Arguments>
    <Description>Java Test: FOP Print Formatter</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>h2</Arguments>
    <Description>Java Test: H2 Database Engine</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>batik</Arguments>
    <Description>Java Test: Batik SVG Toolkit</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>xalan</Arguments>
    <Description>Java Test: Apache Xalan XSLT</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>jme</Arguments>
    <Description>Java Test: jMonkeyEngine</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>tomcat</Arguments>
    <Description>Java Test: Apache Tomcat</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>kafka</Arguments>
    <Description>Java Test: Apache Kafka</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>spring</Arguments>
    <Description>Java Test: Spring Boot</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>tradebeans</Arguments>
    <Description>Java Test: Tradebeans</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>tradesoap</Arguments>
    <Description>Java Test: Tradesoap</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>graphchi</Arguments>
    <Description>Java Test: GraphChi</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>eclipse</Arguments>
    <Description>Java Test: Eclipse</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>jython</Arguments>
    <Description>Java Test: Jython</Description>
  </Execute>
  <Execute>
    <Test>pts/xmrig-1.2.0</Test>
    <Arguments>-a cn/upx2 --bench=1M</Arguments>
    <Description>Variant: CryptoNight-Femto UPX2 - Hash Count: 1M</Description>
  </Execute>
  <Execute>
    <Test>pts/xmrig-1.2.0</Test>
    <Arguments>-a cn-heavy/0 --bench=1M</Arguments>
    <Description>Variant: CryptoNight-Heavy - Hash Count: 1M</Description>
  </Execute>
  <Execute>
    <Test>pts/xmrig-1.2.0</Test>
    <Arguments>-a gr --bench=1M</Arguments>
    <Description>Variant: GhostRider - Hash Count: 1M</Description>
  </Execute>
  <Execute>
    <Test>pts/xmrig-1.2.0</Test>
    <Arguments>-a rx/wow --bench=1M</Arguments>
    <Description>Variant: Wownero - Hash Count: 1M</Description>
  </Execute>
  <Execute>
    <Test>pts/xmrig-1.2.0</Test>
    <Arguments>--bench=1M</Arguments>
    <Description>Variant: Monero - Hash Count: 1M</Description>
  </Execute>
  <Execute>
    <Test>pts/xmrig-1.2.0</Test>
    <Arguments>-a kawpow --bench=1M</Arguments>
    <Description>Variant: KawPow - Hash Count: 1M</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx265 vod</Arguments>
    <Description>Encoder: libx265 - Scenario: Video On Demand</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx264 vod</Arguments>
    <Description>Encoder: libx264 - Scenario: Video On Demand</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx265 platform</Arguments>
    <Description>Encoder: libx265 - Scenario: Platform</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx264 platform</Arguments>
    <Description>Encoder: libx264 - Scenario: Platform</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx265 upload</Arguments>
    <Description>Encoder: libx265 - Scenario: Upload</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx264 upload</Arguments>
    <Description>Encoder: libx264 - Scenario: Upload</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx265 live</Arguments>
    <Description>Encoder: libx265 - Scenario: Live</Description>
  </Execute>
  <Execute>
    <Test>pts/ffmpeg-6.1.0</Test>
    <Arguments>--encoder=libx264 live</Arguments>
    <Description>Encoder: libx264 - Scenario: Live</Description>
  </Execute>
  <Execute>
    <Test>pts/lczero-1.7.0</Test>
    <Arguments>-b eigen</Arguments>
    <Description>Backend: Eigen</Description>
  </Execute>
  <Execute>
    <Test>pts/lczero-1.7.0</Test>
    <Arguments>-b blas</Arguments>
    <Description>Backend: BLAS</Description>
  </Execute>
  <Execute>
    <Test>pts/quantlib-1.2.0</Test>
    <Description>Configuration: Single-Threaded</Description>
  </Execute>
  <Execute>
    <Test>pts/quantlib-1.2.0</Test>
    <Arguments>--mp</Arguments>
    <Description>Configuration: Multi-Threaded</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>-evp chacha20-poly1305</Arguments>
    <Description>Algorithm: ChaCha20-Poly1305</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>-evp aes-256-gcm</Arguments>
    <Description>Algorithm: AES-256-GCM</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>-evp aes-128-gcm</Arguments>
    <Description>Algorithm: AES-128-GCM</Description>
  </Execute>
  <Execute>
    <Test>system/openssl-1.2.0</Test>
    <Arguments>-evp chacha20</Arguments>
    <Description>Algorithm: ChaCha20</Description>
  </Execute>
  <Execute>
    <Test>pts/dacapobench-1.1.0</Test>
    <Arguments>cassandra</Arguments>
    <Description>Java Test: Apache Cassandra</Description>
  </Execute>
</PhoronixTestSuite>
