bs=$1 #1
imgsize=$2
threads_num=$3
saved_model_path="/mnt/workspace/datasets/bert/bert_base.pt"
mhlo_mlir="bert_base.affine_libcall.mhlo.mlir"
vmfb="bert_base.affine_libcall.hggc.vmfb"
entry_function=forward
batch_size=${bs}
benchmark_out=bert_base_bs${bs}.affine_libcall.bm.json
input=${bs}x${imgsize}xi32

echo batch_size=${batch_size},benchmark_out=${benchmark_out}, input=${input}
# 1. import
#iree-import-torch ${saved_model_path}  -o ${mhlo_mlir} --input ${bs}x512xi32 ${bs}x512xi32
iree-import-torch ${saved_model_path}  -o ${mhlo_mlir} --input ${input}  ${input}

# 2. compile
holmes-compile ${mhlo_mlir} --iree-input-type=mhlo --iree-hal-target-backends=hggc  --mlir-disable-threading --mlir-elide-elementsattrs-if-larger=10 --iree-hal-cuda-llvm-target-arch=sm_80 --iree-mlir-to-vm-bytecode-module --holmes-dump-dispatch-info --iree-flow-dump-dispatch-graph --holmes-enable-mark-linalg-op-as-library-call --holmes-enable-corert-conversion --holmes-enable-batchnorm-fusion --holmes-flow-form-dispatch-fragmentary-region --holmes-enable-mhlo-layernorm-fusion=false  --holmes-enable-multi-matmul-fusion  --holmes-flow-demote-f32-to-f16 --holmes-libcall-use-cutlass=true -o ${vmfb}

# 3. run
holmes-benchmark-module --module_file=${vmfb} --device=cuda --entry_function=${entry_function}   --function_input=${bs}x${imgsize}xi32  --function_input=${bs}x${imgsize}xi32  --batch_size=${batch_size}   --enable_multi_stream=true --use_spin_wait=true  --benchmark_repetitions=1 --benchmark_out=${benchmark_out} --benchmark_out_format=json --multi_thread ${threads_num}


python ./acc.py
echo 'run end'