#!/bin/bash

#当前路径,不需要修改
cur_path=`pwd`/../

#集合通信参数,不需要修改

export RANK_SIZE=1
export JOB_ID=10087
RANK_ID_START=0


# 数据集路径,保持为空,不需要修改
data_path=''
#预训练模型地址
ckpt_path=''
# 2、验证数据集路径地址，若不涉及则跳过
eval_data_path=''
#设置默认日志级别,不需要改
#export ASCEND_GLOBAL_LOG_LEVEL=3
#export ASCEND_DEVICE_ID=4

#基础参数，需要模型审视修改
#网络名称，同目录名称
Network="roberta_ID0763_for_TensorFlow"
#训练batch_size
batch_size=32


#TF2.X独有，需要模型审视修改
export NPU_LOOP_SIZE=${train_steps}

#维测参数，precision_mode需要模型审视修改
precision_mode="allow_mix_precision"
#维持参数，以下不需要修改
over_dump=False
data_dump_flag=False
data_dump_step="10"
profiling=False

# 帮助信息，不需要修改
if [[ $1 == --help || $1 == -h ]];then
    echo"usage:./train_performance_1P.sh <args>"
    echo " "
    echo "parameter explain:
    --precision_mode         precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
    --over_dump                    if or not over detection, default is False
    --data_dump_flag                 data dump flag, default is False
    --data_dump_step                 data dump step, default is 10
    --profiling                    if or not profiling for performance debug, default is False
    --data_path                    source data of training
    --ckpt_path                         model
    -h/--help                        show help message
    "
    exit 1
fi

#参数校验，不需要修改
for para in $*
do
    if [[ $para == --precision_mode* ]];then
        precision_mode=`echo ${para#*=}`
    elif [[ $para == --over_dump* ]];then
        over_dump=`echo ${para#*=}`
        over_dump_path=${cur_path}/test/output/overflow_dump
        mkdir -p ${over_dump_path}
    elif [[ $para == --data_dump_flag* ]];then
        data_dump_flag=`echo ${para#*=}`
        data_dump_path=${cur_path}/test/output/data_dump
        mkdir -p ${data_dump_path}
    elif [[ $para == --data_dump_step* ]];then
        data_dump_step=`echo ${para#*=}`
    elif [[ $para == --profiling* ]];then
        profiling=`echo ${para#*=}`
        profiling_dump_path=${cur_path}/test/output/profiling
        mkdir -p ${profiling_dump_path}
    elif [[ $para == --data_path* ]];then
        data_path=`echo ${para#*=}`
    elif [[ $para == --ckpt_path* ]];then
        ckpt_path=`echo ${para#*=}`
        fi
done
# #校验是否传入data_path,不需要修改
# if [[$data_path == ""]];then
#     echo "[Error] para \"data_path\" must be confing"
#     exit 1
# fi

#训练开始时间，不需要修改
start_time=$(date +%s)

#进入训练脚本目录，需要模型审视修改
cd $cur_path/

#创建DeviceID输出目录，不需要修改
if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then
    rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID}
    mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt
else
    mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt
fi

#执行训练脚本，以下传参不需要修改，其他需要模型审视修改
cd $cur_path/pretrain
python3 run_pretraining.py \
  --bert_config_file=$cur_path/Data/Roberta-large/config.json \
  --max_seq_length=128 \
  --max_predictions_per_seq=20 \
  --train_batch_size=32 \
  --learning_rate=2e-5 \
  --num_warmup_steps=100 \
  --num_train_steps=200 \
  --optimizer_type=adam \
  --manual_fp16=True \
  --use_fp16_cls=True \
  --input_files_dir=$data_path/Data/BookCorpus/pretraining_data \
  --eval_files_dir=$eval_data_path \
  --npu_bert_debug=False \
  --npu_bert_use_tdt=True \
  --do_train=True \
  --num_accumulation_steps=1 \
  --npu_bert_job_start_file= \
  --iterations_per_loop=20 \
  --save_checkpoints_steps=100 \
  --npu_bert_clip_by_global_norm=False \
  --distributed=False \
  --npu_bert_loss_scale=0 \
  --output_dir=$cur_path/model \
  --out_log_dir=$cur_path/model/loss > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1
wait
echo "---------------SQuADv1.1----------------------"
cd $cur_path/downstream
# BERT_BASE_DIR=/home/TestUser03/bupt_lil/Model/Roberta-large
# SQUAD_DIR=/home/TestUser03/bupt_lil/Data/SQuAD/data
# OUT_DIR=/home/TestUser03/bupt_lil/Output/Roberta/modelzoo/v1-384
python3 run_squad.py \
  --vocab_file=$cur_path/Data/Roberta-large/vocab.json \
  --merges_file=$cur_path/Data/Roberta-large/merges.txt \
  --bert_config_file=$cur_path/Data/Roberta-large/config.json \
  --init_checkpoint=$data_path/Data/Roberta-large/tf_model/roberta_large.ckpt \
  --do_train=True \
  --train_file=$data_path/Data/SQuAD/data/train-v1.1.json \
  --do_predict=True \
  --predict_file=$data_path/Data/SQuAD/data/dev-v1.1.json \
  --train_batch_size=4 \
  --predict_batch_size=32 \
  --learning_rate=3e-5 \
  --num_train_epochs=2.0 \
  --max_seq_length=512 \
  --doc_stride=128 \
  --output_dir=$cur_path/model/Roberta/modelzoo/v1-384 \
  --version_2_with_negative=False > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/test1_${ASCEND_DEVICE_ID}.log 2>&1
wait
python3 $data_path/Data/SQuAD/data/evaluate-v1.1.py $data_path/Data/SQuAD/data/dev-v1.1.json $cur_path/model/Roberta/modelzoo/v1-384/predictions.json > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/test11_${ASCEND_DEVICE_ID}.log 2>&1
wait
echo "---------------SQuADv2.0----------------------"
# BERT_BASE_DIR=/home/TestUser03/bupt_lil/Model/Roberta-large
# SQUAD_DIR=/home/TestUser03/bupt_lil/Data/SQuAD/data
# OUT_DIR=/home/TestUser03/bupt_lil/Output/Roberta/modelzoo/v2-512
cd $cur_path/downstream
python3 run_squad.py \
  --vocab_file=$cur_path/Data/Roberta-large/vocab.json \
  --merges_file=$cur_path/Data/Roberta-large/merges.txt \
  --bert_config_file=$cur_path/Data/Roberta-large/config.json \
  --init_checkpoint=$data_path/Data/Roberta-large/tf_model/roberta_large.ckpt \
  --do_train=True \
  --train_file=$data_path/Data/SQuAD/data/train-v2.0.json \
  --do_predict=True \
  --predict_file=$data_path/Data/SQuAD/data/dev-v2.0.json \
  --train_batch_size=4 \
  --predict_batch_size=32 \
  --learning_rate=3e-5 \
  --num_train_epochs=2.0 \
  --max_seq_length=512 \
  --doc_stride=128 \
  --output_dir=$cur_path/model/Roberta/modelzoo/v2-512 \
  --version_2_with_negative=True  > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/test2_${ASCEND_DEVICE_ID}.log 2>&1

wait
python3 $data_path/Data/SQuAD/data/evaluate-v2.0.py $data_path/Data/SQuAD/data/dev-v2.0.json $cur_path/model/Roberta/modelzoo/v2-512/predictions.json \
  --na-prob-file $cur_path/model/Roberta/modelzoo/v2-512/null_odds.json > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/test22_${ASCEND_DEVICE_ID}.log 2>&1
wait

#训练结束时间，不需要修改
end_time=$(date +%s)
e2e_time=$(( $end_time - $start_time ))

#结果打印，不需要修改
echo "------------------ Final result ------------------"
#输出性能FPS，需要模型审视修改
TrainingTime=`grep 'time' $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $9}'`
FPS=`grep 'Throughput' $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $6}'`
#打印，不需要修改
echo "Final Performance TrainingTime : $TrainingTime"
echo "Final Performance images/sec : $FPS"

#输出训练精度,需要模型审视修改
train_accuracy=`grep exact_match $cur_path/test/output/${ASCEND_DEVICE_ID}/test11_${ASCEND_DEVICE_ID}.log|awk '{print $2}'|awk -F ',' '{print $1}'`

#打印，不需要修改
echo "Final Train Accuracy : ${train_accuracy}"
echo "E2E Training Duration sec : $e2e_time"

#性能看护结果汇总
#训练用例信息，不需要修改
BatchSize=${batch_size}
DeviceType=`uname -m`
CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'

##获取性能数据，不需要修改
#吞吐量
ActualFPS=${FPS}
#单迭代训练时长
#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${FPS}'/69}'`

#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中，需要根据模型审视
grep 'Average' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk  '{print $24}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
#最后一个迭代loss值，不需要修改
ActualLoss=`awk 'END {print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`

#关键信息打印到${CaseName}.log中，不需修改
echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log