#!/bin/bash

export CUDA_DEVICE_MAX_CONNECTIONS=1

source /usr/local/Ascend/ascend-toolkit/set_env.sh
source /usr/local/Ascend/nnal/atb/set_env.sh 

# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_speechless_thoughts_252k
# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_Infinity_Instruct_50k
# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_Infinity_Instruct_250k
# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_Infinity_Instruct_1M
# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_Infinity_Instruct_3M
# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_Infinity_Instruct_9M
# MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_MindSpeed_Infinity_Instruct_7M
MODEL_ROOT_DIR=./outputs/Llama-2-7b/tuned_with_MindSpeed_Infinity_Instruct_7M_4npu

# Update last ckpt step in  latest_checkpointed_iteration.txt 
# CKPT_STEP=10000
# CKPT_STEP=8000
# CKPT_STEP=6000
# CKPT_STEP=4000
# CKPT_STEP=2000
# CKPT_STEP=750
# CKPT_STEP=3000
# CKPT_STEP=3000
# CKPT_STEP=6000
# CKPT_STEP=9000
# CKPT_STEP=18000
# CKPT_STEP=27000
CKPT_STEP=21000
CHECKPOINT=${MODEL_ROOT_DIR}/ckpt
TARGET_MODEL=${MODEL_ROOT_DIR}/1x1/iter_${CKPT_STEP}

python convert_ckpt.py \
    --model-type GPT \
    --load-model-type mg \
    --save-model-type mg \
    --use-mcore-models \
    --target-tensor-parallel-size 1 \
    --target-pipeline-parallel-size 1 \
    --load-dir ${CHECKPOINT} \
    --ckpt-step ${CKPT_STEP} \
    --save-dir ${TARGET_MODEL}
