#!/bin/bash

bs=$1
if [ -z ${bs} ]; then
	echo "Usage: bash generate_om.sh <batch_size>"
	echo "[ERROR] Must Specify the batchsize of model."
	exit 1
fi

set -e

cache_dir=./model_cache/
if [ ! -d ${cache_dir} ]; then
	mkdir ${cache_dir}
fi


# step1: download original onnx model
echo -e "\ndownloading the onnx model..."
orig_onnx=./model_cache/bert_large.onnx
if [ -f ${orig_onnx} ]; then
	echo "use cache: ${orig_onnx}"
else
	wget -O ${orig_onnx} https://zenodo.org/record/3733910/files/model.onnx?download=1
	echo "download successiful, model saved in ${orig_onnx}"
fi


# step2: simplifiy onnx model by shape
echo -e "\nsimplifing onnx model by batchsize=${bs}..."
simpified_onnx=./model_cache/bert_large_bs${bs}.onnx
if [ -f ${simpified_onnx} ]; then
	echo "use cache: ${simpified_onnx}"
else
	python -m onnxsim ${orig_onnx} ${simpified_onnx} \
		--overwrite-input-shape "input_ids:${bs},384" "input_mask:${bs},384" "segment_ids:${bs},384"
	echo "simplify successful, model saved in ${simpified_onnx}"
fi


# step3: big kernel optimization
echo -e "\nbig kernel optimizing..."
optimized_onnx=./model_cache/bert_large_bs${bs}_opt.onnx
if [ -f ${optimized_onnx} ]; then
	echo "use cache: ${optimized_onnx}"
else
	python Bert_Base_Uncased_for_Pytorch/fix_onnx.py ${simpified_onnx} ${optimized_onnx}
	echo "optimize successful, model saved in ${optimized_onnx}"
fi


# step4: infer shape
echo -e "\ninfering shape..."
shaped_onnx=./model_cache/bert_large_bs${bs}_shape.onnx
if [ -f ${shaped_onnx} ]; then
	echo "use cache: ${shaped_onnx}"
else
	python scripts/infer_shape.py ${optimized_onnx} ${shaped_onnx}
	echo "infer shape successful, model saved in ${shaped_onnx}"
fi


# step5: quantize onnx model
echo -e "\nquantizing onnx model..."
quantized_onnx=./model_cache/bert_large_bs${bs}_quant.onnx
if [ -f ${quantized_onnx} ]; then
	echo "use cache: ${quantized_onnx}"
else
	quant_config=./quant_bs${bs}.cfg
	python scripts/create_config.py ${shaped_onnx} ${quant_config}
	quant_data='./quant_data'
	if [ ! -d ${quant_data} ]; then
		python scripts/generate_quantize_data.py ./data/bert_bin/ ${quant_data}
	fi
	amct_onnx calibration \
		--model ${shaped_onnx} \
		--save_path ${quantized_onnx} \
		--input_shape "input_ids:${bs},384;input_mask:${bs},384;segment_ids:${bs},384" \
		--data_dir "quant_data/bs${bs}/ids;quant_data/bs${bs}/mask;quant_data/bs${bs}/seg" \
		--data_types "int64;int64;int64" \
		--calibration_config ${quant_config}
	mv "${quantized_onnx}_deploy_model.onnx" ${quantized_onnx}
	rm ${quant_config}
	echo "quantize successful, model saved in ${quantized_onnx}"
fi


# step6: convert onnx to om
echo -e "\nconverting onnx to om..."
output_om=./model_cache/bert_large_bs${bs}.om
if [ -f ${output_om} ]; then
	echo "use cache: ${output_om}"
else
	export TUNE_BANK_PATH=./aoe_knowledges/bs${bs}
	atc --framework 5 \
	    --model ${quantized_onnx} \
	    --output ${output_om%.om} \
	    --input_shape "input_ids:${bs},384;input_mask:${bs},384;segment_ids:${bs},384" \
	    --input_format ND \
	    --log error \
	    --soc_version Ascend310P3 \
	    --optypelist_for_implmode="Gelu" \
	    --op_select_implmode=high_performance
	echo "convert successful, model saved in ${output_om}"
fi

echo -e "\ndone. the om model is ${output_om}"
