run_model_list=(
        "b4"
       "bart_decoder_fixed"
        "bart_encoder_fixed"
        "ocrDet_960x960_op11"
        "RepVGG-A1"
        "title_bert"
        # "audio"
        "vatmm"
    )

ms_inputShapes=""
function b4() {
    ms_inputShapes="1,224,224,3"
}

function bart_decoder_fixed() {
    ms_inputShapes="1,32:1,32:1,128,768:1,128"
}

function bart_encoder_fixed() {
    ms_inputShapes="1,128:1,128"
}

function ocrDet_960x960_op11() {
    ms_inputShapes="1,960,960,3"
}

function RepVGG-A1() {
    ms_inputShapes="1,224,224,3"
}

function title_bert() {
    ms_inputShapes="1,256:1,256"
}

function audio() {
    ms_inputShapes="1,96,64,1"
}

function vatmm() {
    ms_inputShapes="10,224,224,3:1,51,128:1,1,768"
}

function model_convert() {
    for cur_run_model in ${run_model_list[*]}; do
        echo "$1"
        echo "cur run model : ${cur_run_model}.onnx, thread num = 1, 2, 4, 8"
        # mo --input_model ./${cur_run_model}.onnx

        mindspore_package_path=$1
        export LD_LIBRARY_PATH=${mindspore_package_path}//tools/converter//lib/
        ${mindspore_package_path}//tools/converter//converter/converter_lite --fmk=ONNX --modelFile=./${cur_run_model}.onnx --outputFile=./${cur_run_model}.onnx.ms
    done
}
# model_convert "./convert_new_mindspore-lite-1.8.0-linux-x64"


function model_benchmark() {
    for cur_run_model in ${run_model_list[*]}; do
        eval ${cur_run_model}
        echo "$1"
        echo "cur run model : ${cur_run_model}.onnx, input shape = ${ms_inputShapes}, thread num = 1, 2, 4, 8"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -shape=[1,1,64,96] -nthreads 1 2>&1 | grep "AVG:"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -shape=[1,1,64,96] -nthreads 2 2>&1 | grep "AVG:"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -shape=[1,1,64,96] -nthreads 4 2>&1 | grep "AVG:"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -shape=[1,1,64,96] -nthreads 8 2>&1 | grep "AVG:"

        # echo "cur run model : ${cur_run_model}.onnx, -enforcebf16 False"
        benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 1 -enforcebf16 False 2>&1 | grep "AVG:"
        benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 2 -enforcebf16 False 2>&1 | grep "AVG:"
        benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 4 -enforcebf16 False 2>&1 | grep "AVG:"
        benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 8 -enforcebf16 False 2>&1 | grep "AVG:"

        # echo "cur run model : ${cur_run_model}.onnx, -enforcebf16 True"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 1 -enforcebf16 True 2>&1 | grep "AVG:"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 2 -enforcebf16 True 2>&1 | grep "AVG:"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 4 -enforcebf16 True 2>&1 | grep "AVG:"
        # benchmark_app -m ./${cur_run_model}.xml -d CPU -nstreams 1 -niter 20 -nthreads 8 -enforcebf16 True 2>&1 | grep "AVG:"

        mindspore_package_path=$1
        chmod +777 ${mindspore_package_path}/tools/benchmark/benchmark
        export LD_LIBRARY_PATH=${mindspore_package_path}/runtime/lib/:${mindspore_package_path}/runtime/third_party/glog/
        # ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=1 --inputShapes=1,96,64,1 2>&1 | grep "AvgRunTime = "
        # ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=2 --inputShapes=1,96,64,1 2>&1 | grep "AvgRunTime = "
        # ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=4 --inputShapes=1,96,64,1 2>&1 | grep "AvgRunTime = "
        # ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=8 --inputShapes=1,96,64,1 2>&1 | grep "AvgRunTime = "

        ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --cpuBindMode=1  --numThreads=1 2>&1 | grep "AvgRunTime = "
        ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --cpuBindMode=1 --numThreads=2 2>&1 | grep "AvgRunTime = "
        ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --cpuBindMode=1 --numThreads=4 2>&1 | grep "AvgRunTime = "
        ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=false --device=CPU --enableFp16=false --cpuBindMode=1 --numThreads=8 2>&1 | grep "AvgRunTime = "


        # ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=true --device=CPU --enableFp16=false --numThreads=1
        # ${mindspore_package_path}/tools/benchmark/benchmark --modelFile=./${cur_run_model}.onnx.ms --loopCount=10 --timeProfiling=true --device=CPU --enableFp16=false --numThreads=8


        # ${mindspore_package_path}/tools/benchmark/benchmark --enableParallelPredict=true --interOpParallelNum=1 --parallelNum=1 --workersNum=1 --parallelTaskNum=1 --modelFile=./${cur_run_model}.onnx.ms --inputShapes=${ms_inputShapes} --loopCount=1 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=1
        # ${mindspore_package_path}/tools/benchmark/benchmark --enableParallelPredict=true --interOpParallelNum=1 --parallelNum=1 --workersNum=1 --parallelTaskNum=1 --modelFile=./${cur_run_model}.onnx.ms --inputShapes=${ms_inputShapes} --loopCount=1 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=2
        # ${mindspore_package_path}/tools/benchmark/benchmark --enableParallelPredict=true --interOpParallelNum=2 --parallelNum=1 --workersNum=1 --parallelTaskNum=1 --modelFile=./${cur_run_model}.onnx.ms --inputShapes=${ms_inputShapes} --loopCount=1 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=4
        # ${mindspore_package_path}/tools/benchmark/benchmark --enableParallelPredict=true --interOpParallelNum=2 --parallelNum=1 --workersNum=1 --parallelTaskNum=1 --modelFile=./${cur_run_model}.onnx.ms --inputShapes=${ms_inputShapes} --loopCount=1 --timeProfiling=false --device=CPU --enableFp16=false --numThreads=8
        echo ""
    done
}
model_benchmark "./bind_cpu2_mindspore-lite-1.8.0-linux-x64/"
# model_benchmark "./avx_mindspore-lite-1.8.0.20220627-linux-x64/"
# model_benchmark "./avx512_mindspore-lite-1.8.0.20220627-linux-x64/"
# model_benchmark "./server_mindspore-lite-1.8.0.20220627-linux-x64/"
# model_benchmark "./server_x86/"
