<?xml version="1.0"?>
<!--Phoronix Test Suite v10.8.4-->
<PhoronixTestSuite>
  <SuiteInformation>
    <Title>eMAG Suite</Title>
    <Version>1.0.0</Version>
    <TestType>System</TestType>
    <Description>Test suite extracted from eMAG.</Description>
    <Maintainer> </Maintainer>
  </SuiteInformation>
  <Execute>
    <Test>pts/numpy-1.2.1</Test>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--rnn --batch=inputs/rnn/perf_rnn_training --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: Recurrent Neural Network Training - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--rnn --batch=inputs/rnn/perf_rnn_training --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: Recurrent Neural Network Training - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--rnn --batch=inputs/rnn/perf_rnn_inference_lb --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: Recurrent Neural Network Inference - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--rnn --batch=inputs/rnn/perf_rnn_training --cfg=bf16bf16bf16 --engine=cpu</Arguments>
    <Description>Harness: Recurrent Neural Network Training - Data Type: bf16bf16bf16 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/avifenc-1.0.0</Test>
    <Arguments>-s 0</Arguments>
    <Description>Encoder Speed: 0</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--rnn --batch=inputs/rnn/perf_rnn_inference_lb --cfg=bf16bf16bf16 --engine=cpu</Arguments>
    <Description>Harness: Recurrent Neural Network Inference - Data Type: bf16bf16bf16 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--rnn --batch=inputs/rnn/perf_rnn_inference_lb --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: Recurrent Neural Network Inference - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/build-eigen-1.1.0</Test>
    <Description>Time To Compile</Description>
  </Execute>
  <Execute>
    <Test>pts/rav1e-1.4.0</Test>
    <Arguments>-s 5 -l 60</Arguments>
    <Description>Speed: 5</Description>
  </Execute>
  <Execute>
    <Test>pts/avifenc-1.0.0</Test>
    <Arguments>-s 2</Arguments>
    <Description>Encoder Speed: 2</Description>
  </Execute>
  <Execute>
    <Test>pts/rav1e-1.4.0</Test>
    <Arguments>-s 6 -l 60</Arguments>
    <Description>Speed: 6</Description>
  </Execute>
  <Execute>
    <Test>pts/rav1e-1.4.0</Test>
    <Arguments>-s 1 -l 20</Arguments>
    <Description>Speed: 1</Description>
  </Execute>
  <Execute>
    <Test>pts/stockfish-1.2.0</Test>
    <Description>Total Time</Description>
  </Execute>
  <Execute>
    <Test>pts/asmfish-1.1.2</Test>
    <Description>1024 Hash Memory, 26 Depth</Description>
  </Execute>
  <Execute>
    <Test>pts/rav1e-1.4.0</Test>
    <Arguments>-s 10 -l 90</Arguments>
    <Description>Speed: 10</Description>
  </Execute>
  <Execute>
    <Test>pts/espeak-1.6.1</Test>
    <Description>Text-To-Speech Synthesis</Description>
  </Execute>
  <Execute>
    <Test>pts/encode-ape-1.4.0</Test>
    <Description>WAV To APE</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--deconv --batch=inputs/deconv/shapes_1d --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: Deconvolution Batch shapes_1d - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/simdjson-1.1.1</Test>
    <Arguments>Kostya</Arguments>
    <Description>Throughput Test: Kostya</Description>
  </Execute>
  <Execute>
    <Test>pts/encode-opus-1.1.1</Test>
    <Description>WAV To Opus Encode</Description>
  </Execute>
  <Execute>
    <Test>pts/clomp-1.1.1</Test>
    <Description>Static OMP Speedup</Description>
  </Execute>
  <Execute>
    <Test>pts/simdjson-1.1.1</Test>
    <Arguments>LargeRandom</Arguments>
    <Description>Throughput Test: LargeRandom</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--ip --batch=inputs/ip/shapes_1d --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: IP Shapes 1D - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/simdjson-1.1.1</Test>
    <Arguments>PartialTweets</Arguments>
    <Description>Throughput Test: PartialTweets</Description>
  </Execute>
  <Execute>
    <Test>pts/simdjson-1.1.1</Test>
    <Arguments>DistinctUserID</Arguments>
    <Description>Throughput Test: DistinctUserID</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--matmul --batch=inputs/matmul/shapes_transformer --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: Matrix Multiply Batch Shapes Transformer - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--matmul --batch=inputs/matmul/shapes_transformer --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: Matrix Multiply Batch Shapes Transformer - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--ip --batch=inputs/ip/shapes_3d --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: IP Shapes 3D - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/mafft-1.6.2</Test>
    <Description>Multiple Sequence Alignment - LSU RNA</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--conv --batch=inputs/conv/shapes_auto --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: Convolution Batch Shapes Auto - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/coremark-1.0.1</Test>
    <Description>CoreMark Size 666 - Iterations Per Second</Description>
  </Execute>
  <Execute>
    <Test>pts/avifenc-1.0.0</Test>
    <Arguments>-s 8</Arguments>
    <Description>Encoder Speed: 8</Description>
  </Execute>
  <Execute>
    <Test>pts/avifenc-1.0.0</Test>
    <Arguments>-s 10</Arguments>
    <Description>Encoder Speed: 10</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--deconv --batch=inputs/deconv/shapes_1d --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: Deconvolution Batch shapes_1d - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/x264-2.6.1</Test>
    <Description>H.264 Video Encoding</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--ip --batch=inputs/ip/shapes_1d --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: IP Shapes 1D - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--deconv --batch=inputs/deconv/shapes_3d --cfg=f32 --engine=cpu</Arguments>
    <Description>Harness: Deconvolution Batch shapes_3d - Data Type: f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--ip --batch=inputs/ip/shapes_3d --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: IP Shapes 3D - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--conv --batch=inputs/conv/shapes_auto --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: Convolution Batch Shapes Auto - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
  <Execute>
    <Test>pts/tscp-1.2.2</Test>
    <Description>AI Chess Performance</Description>
  </Execute>
  <Execute>
    <Test>pts/onednn-1.6.1</Test>
    <Arguments>--deconv --batch=inputs/deconv/shapes_3d --cfg=u8s8f32 --engine=cpu</Arguments>
    <Description>Harness: Deconvolution Batch shapes_3d - Data Type: u8s8f32 - Engine: CPU</Description>
  </Execute>
</PhoronixTestSuite>
