#!/bin/bash

##################### 环境变量与依赖设置 #####################
export CUDA_DEVICE_MAX_CONNECTIONS=1
export ASCEND_RT_VISIBLE_DEVICES=0,1,2,3
export LD_LIBRARY_PATH=/usr/local/lib:/root/miniconda3/lib:$LD_LIBRARY_PATH
export HCCL_CONNECT_TIMEOUT=1200
export COMBINED_ENABLE=1

# 根据你的环境路径调整
source /usr/local/Ascend/ascend-toolkit/set_env.sh

##################### 路径/参数定义 #####################
# 1) 预训练模型与tokenizer路径
TOKENIZER_PATH="/work/cache/MindSpeed-LLM/shakechen/Llama-2-7b-hf/"
CHECKPOINT="/work/cache/MindSpeed-LLM/model_weights/llama2-7b-lora2mcore2/"

# 2) 任务与数据
DATA_PATH="/work/cache/MindSpeed-LLM/mmlu/test/"
TASK="mmlu"

# 3) 分布式运行相关配置
MASTER_ADDR=localhost
MASTER_PORT=6033
NNODES=1
NODE_RANK=0
NPUS_PER_NODE=4
DISTRIBUTED_ARGS="--nproc_per_node $NPUS_PER_NODE \
                  --nnodes $NNODES \
                  --node_rank $NODE_RANK \
                  --master_addr $MASTER_ADDR \
                  --master_port $MASTER_PORT"

##################### 循环次数设置 #####################
LOOP_COUNT=60

# ✅ 循环前先初始化 iteration 为 2300
# 这个是根据实际评估那一份CHECKPOINT更高分设定固定值
# 
echo "2300" > saves/latest_checkpointed_iteration.txt
echo "已将 saves/latest_checkpointed_iteration.txt 初始化为: 2300"



for i in $(seq 1 $LOOP_COUNT); do
  echo "========== 开始第 $i/$LOOP_COUNT 次循环 =========="


  # (5) 运行 convert_ckpt.py
python convert_ckpt.py     --model-type GPT     --load-model-type mg     --save-model-type mg     --load-dir /work/cache/MindSpeed-LLM/shakechen/Llama-2-7b-hf-mcore2/     --lora-load /work/cache/MindSpeed-LLM/saves/     --lora-r 8     --lora-alpha 32     --lora-target-modules linear_q linear_k linear_v linear_proj linear_fc1 linear_fc2     --target-tensor-parallel-size 1     --target-pipeline-parallel-size 1     --save-dir ./model_weights/llama2-7b-lora2mcore2     --use-mcore-models     --model-type-hf llama2
  
  # (1) 获取当前时间戳
  TIMESTAMP=$(date +"%Y%m%d_%H%M%S")

  # (2) 读取最新 iteration 值
  LATEST_CKPT=$(cat saves/latest_checkpointed_iteration.txt)
  echo "读取到 saves/latest_checkpointed_iteration.txt 内容: $LATEST_CKPT"

  # (3) 执行 evaluation.py 并将日志写到包含 CKPT 与时间戳的文件
  python -m torch.distributed.launch $DISTRIBUTED_ARGS evaluation.py \
         --task-data-path $DATA_PATH \
         --task $TASK \
         --seq-length 4096 \
         --max-new-tokens 1 \
         --evaluation-batch-size 1 \
         --max-position-embeddings 4096 \
         --tensor-model-parallel-size 1 \
         --pipeline-model-parallel-size 1 \
         --num-layers 32 \
         --hidden-size 4096 \
         --ffn-hidden-size 11008 \
         --num-attention-heads 32 \
         --swiglu \
         --disable-bias-linear \
         --load ${CHECKPOINT} \
         --normalization RMSNorm \
         --tokenizer-type PretrainedFromHF \
         --tokenizer-name-or-path ${TOKENIZER_PATH} \
         --tokenizer-not-use-fast \
         --bf16 \
         --micro-batch-size 4 \
         --use-fused-rmsnorm \
         --position-embedding-type rope \
         --exit-on-missing-checkpoint \
         --no-load-rng \
         --no-load-optim \
         --untie-embeddings-and-output-weights \
         --no-masked-softmax-fusion \
         --make-vocab-size-divisible-by 1 \
         --use-mcore-models \
         --seed 42 \
  | tee logs/evaluation_llama2_7b_${TASK}_${LATEST_CKPT}_${TIMESTAMP}.log

  # (4) 将 latest_checkpointed_iteration.txt 里的数字加 100 并写回
  NEW_CKPT=$((LATEST_CKPT + 100))
  echo "$NEW_CKPT" > saves/latest_checkpointed_iteration.txt
  echo "已更新 latest_checkpointed_iteration.txt 为: $NEW_CKPT"

  

  echo "========== 第 $i/$LOOP_COUNT 次循环结束 =========="
  echo
done

echo "脚本已完成全部 $LOOP_COUNT 次循环！"
