#!/bin/bash
set -e

START_TIME=$SECONDS

conda_cmd="source activate && conda activate python310_torch25_cuda"
echo ${conda_cmd}
eval ${conda_cmd} 

ROOT_DIR=$( dirname -- "$( readlink -f -- "$0"; )"; )
ROOT_DIR=${ROOT_DIR}/../../../.. #zj-megatron

source_cmd="source ${ROOT_DIR}/zj-megatron/zj_examples/xpu_env.sh"
echo ${source_cmd}
eval ${source_cmd} 
unset USE_FAST_BF16_FC

MODEL_SIZE=70B
MEGATRON_PATH=${ROOT_DIR}
export PYTHONPATH=${MEGATRON_PATH}/zj-megatron:${MEGATRON_PATH}/zj-megatron/Megatron
export CUDA_DEVICE_MAX_CONNECTIONS=1

TP=${TP:-8}
PP=${PP:-8}

HF_CKPT_PATH=${ROOT_DIR}/../model/Meta-Llama-3-70B
SOURCE_CKPT_PATH=${ROOT_DIR}/../ckpt/Meta-Llama-3-70B-mcore-TP-${TP}-PP-${PP}
TARGET_CKPT_PATH=${ROOT_DIR}/../ckpt/Meta-Llama-3-70B-hf

PR=bf16
TE=true
mg2hf=true

EXTRA_VOCAB_SIZE=256
NUM_EXPERTS=0
EXPERTS_TOPK=0
EP=1
NUM_EXPERT_SPLITS=0

export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
MASTER_PORT=${MASTER_PORT:-9988}

DISTRIBUTED_ARGS="--nproc_per_node 1 --nnodes 1 --node_rank 0 --master_addr $MASTER_ADDR --master_port $MASTER_PORT"

if [ $MODEL_SIZE = 70B ]; then

NUM_LAYERS=80
HIDDEN_SIZE=8192
NUM_ATTN_HEADS=64
INTERMEDIATE_SIZE=28672
NUM_KV_HEADS=8
MAX_POSITION_EMBEDDINGS=131072
VOCAB_SIZE=128256
ROPE_THETA=500000
gqa_options=" \
		    --group-query-attention \
		    --num-query-groups 8"

cpu_options=" \
            --use-cpu-initialization"

elif [ $MODEL_SIZE = 8B ]; then

NUM_LAYERS=32
HIDDEN_SIZE=4096
NUM_ATTN_HEADS=32
INTERMEDIATE_SIZE=14336
NUM_KV_HEADS=8
MAX_POSITION_EMBEDDINGS=8192
VOCAB_SIZE=128256
ROPE_THETA=500000

gqa_options=" \
		    --group-query-attention \
		    --num-query-groups 8"

cpu_options=""
fi

if [ $NUM_EXPERT_SPLITS -gt 0 ]; then

INTERMEDIATE_SIZE=$(( ${INTERMEDIATE_SIZE} / ${NUM_EXPERT_SPLITS}))

fi

if [ $NUM_EXPERTS -gt 0 ]; then
    expert_options="
                --moe-router-topk ${EXPERTS_TOPK} \
                --num-experts ${NUM_EXPERTS} \
                --expert-model-parallel-size 1 \
                --target_expert_model_parallel_size ${EP} \
                --num_expert_split_size ${NUM_EXPERT_SPLITS} \
    "
else
    expert_options=""
fi

if [ $mg2hf = true ]; then
    convert_options="
                --convert_checkpoint_from_megatron_to_transformers
    "
elif [ $mg2hf = false ]; then
    convert_options=""
fi

# template_json="./hf_llama_moe/config_TEMPLATE.json"
# config_json="./hf_llama_moe/config.json"
# sed "s/CONFIG_HIDDEN_SIZE/${HIDDEN_SIZE}/" ${template_json} \
#     | sed "s/CONFIG_INTERMEDIATE_SIZE/${INTERMEDIATE_SIZE}/" \
#     | sed "s/CONFIG_ATTENTION_HEADS/${NUM_ATTN_HEADS}/" \
#     | sed "s/CONFIG_HIDDEN_LAYERS/${NUM_LAYERS}/" \
#     | sed "s/CONFIG_NUM_EXPERTS/${NUM_EXPERTS}/" \
#     | sed "s/CONFIG_EXPERTS_topk/${EXPERTS_TOPK}/" \
#     | sed "s/CONFIG_KV_HEADS/${NUM_KV_HEADS}/" \
#     | sed "s/CONFIG_VOCAB_SIZE/${VOCAB_SIZE}/" \
#     | sed "s/CONFIG_ROPE_THETA/${ROPE_THETA}/" \
# 	  > ${config_json}

if [ $TE = true ]; then
    te_options=" \
		    --transformer-impl transformer_engine"

elif [ $TE = false ]; then
    te_options=" \
        --transformer-impl local"
fi

if [ $PR = fp16 ]; then
    if [ $MODEL_SIZE != 70B ]; then
        pr_options=" \
		        --fp16"
    else
        pr_options=" \
		        --target_params_dtype fp16"
    fi
elif [ $PR = bf16 ]; then
    if [ $MODEL_SIZE != 70B ]; then
        pr_options=" \
		        --bf16"
    else
        pr_options=" \
		        --target_params_dtype bf16"
    fi
fi

if [ $MODEL_SIZE != 70B ]; then
torchrun ${DISTRIBUTED_ARGS} hf2mcore.py \
    --load_path ${SOURCE_CKPT_PATH} \
    --save_path ${TARGET_CKPT_PATH} \
    --load ${HF_CKPT_PATH} \
    --huggingface_model_path ${HF_CKPT_PATH} \
    --megatron-path ${MEGATRON_PATH} \
    --target_tensor_model_parallel_size ${TP} \
    --target_pipeline_model_parallel_size ${PP} \
    --micro-batch-size 1 \
    --swiglu \
    --num-layers 1 \
    --hidden-size 1 \
    --ffn-hidden-size 1 \
    --norm-epsilon 1e-5 \
    --num-attention-heads 1 \
    --max-position-embeddings 1 \
    --seq-length 1 \
    --no-async-tensor-model-parallel-allreduce \
    --patch-tokenizer-type LLama3Tokenizer \
    --extra-vocab-size ${EXTRA_VOCAB_SIZE} \
    --untie-embeddings-and-output-weights \
    --no-rope-fusion \
    --use-rotary-position-embeddings \
    --disable-bias-linear \
    --normalization RMSNorm \
    --use-mcore-models \
    --attention-dropout 0.0 \
    --hidden-dropout 0.0 \
    ${pr_options} \
    ${expert_options} \
    ${convert_options} \
    ${gqa_options} \
    ${te_options} \
    ${cpu_options}

else
cmd="torchrun ${DISTRIBUTED_ARGS} hf2mcore_70b.py \
  --load ${HF_CKPT_PATH} \
  --megatron-path ${MEGATRON_PATH} \
  --load_path ${SOURCE_CKPT_PATH} \
  --save_path ${TARGET_CKPT_PATH} \
  --target_tensor_model_parallel_size ${TP} \
  --target_pipeline_model_parallel_size ${PP} \
${pr_options} \
${convert_options}"

echo ${cmd}
eval ${cmd} 
fi

ELAPSED_TIME=$(($SECONDS - $START_TIME))
echo "$(($ELAPSED_TIME/60)) min $(($ELAPSED_TIME%60)) sec"