#!/bin/bash
#set -x
SCRIPT_DIR=$(dirname "$(realpath "$0")")
PROJECT_DIR=$(realpath "$SCRIPT_DIR"/../)
echo SCRIPT_DIR=$SCRIPT_DIR
echo PROJECT_DIR=$PROJECT_DIR

which python
export PYTHONPATH=${PROJECT_DIR}:${PYTHONPATH}
echo ${PYTHONPATH}

function killall {
    echo `ps -ef | grep $1 | grep -v grep | awk '{print $2}'`
    ps -ef | grep $1 | grep -v grep | awk '{print $2}' |xargs kill -9
}

usage() {
  echo "Usage: ${0} [-t|--train_stage] [-m|--model_size] [-l|--learning_rate] [-w|--num_warmup_steps] [-e|--num_train_epochs] [-x|--max_seq_len] [-d|--data_path] [-i|--images_path] [--images_lmdb_path] [-c|--load_ckpt_path] [-o|--save_dir] [--gradient_accumulation_steps] [-b|--train_micro_batch_size_per_gpu] [-z|--zero_stage]"
  exit 1
}

TRAIN_STAGE=""
MODEL_SIZE=""
LEARNING_RATE=""
NUM_WARMUP_STEPS=""
NUM_TRAIN_EPOCHS=""
MAX_SEQ_LEN=""
DATA_PATH=""
IMAGES_PATH=""
IMAGES_LMDB_PATH=""
LOAD_CKPT_PATH=""
SAVE_DIR=""
TRAIN_MICRO_BATCH_SIZE_PER_GPU=""
ZERO_STAGE=""

while [[ $# -gt 0 ]]; do
  key=${1}
  case ${key} in
    -t|--train_stage)
      TRAIN_STAGE=${2}
      shift 2
      ;;
    -m|--model_size)
      MODEL_SIZE=${2}
      shift 2
      ;;
    -l|--learning_rate)
      LEARNING_RATE=${2}
      shift 2
      ;;
    -w|--num_warmup_steps)
      NUM_WARMUP_STEPS=${2}
      shift 2
      ;;
    -e|--num_train_epochs)
      NUM_TRAIN_EPOCHS=${2}
      shift 2
      ;;
    -x|--max_seq_len)
      MAX_SEQ_LEN=${2}
      shift 2
      ;;
    -d|--data_path)
      DATA_PATH=${2}
      shift 2
      ;;
    -i|--images_path)
      IMAGES_PATH=${2}
      shift 2
      ;;
    -i|--images_lmdb_path)
      IMAGES_LMDB_PATH=${2}
      shift 2
      ;;
    -c|--load_ckpt_path)
      LOAD_CKPT_PATH=${2}
      shift 2
      ;;
    -o|--save_dir)
      SAVE_DIR=${2}
      shift 2
      ;;
    -b|--train_micro_batch_size_per_gpu)
      TRAIN_MICRO_BATCH_SIZE_PER_GPU=${2}
      shift 2
      ;;
    --gradient_accumulation_steps)
      GRADIENT_ACCUMULATION_STEPS=${2}
      shift 2
      ;;      
    -z|--zero_stage)
      ZERO_STAGE=${2}
      shift 2
      ;;
    *)
      usage
      shift
      ;;
  esac
done

# if [[ -z "${TRAIN_STAGE}" || -z "${MODEL_SIZE}" || -z "${LEARNING_RATE}" || -z "${NUM_WARMUP_STEPS}" || -z "${NUM_TRAIN_EPOCHS}" || -z "${MAX_SEQ_LEN}" || -z "${SAVE_DIR}" || -z "${TRAIN_MICRO_BATCH_SIZE_PER_GPU}" || -z "${ZERO_STAGE}" ]]; then
#   echo "Error: Missing required parameters."
#   usage
# fi

# Print the values (for debugging purposes)
echo "TRAIN_STAGE: $TRAIN_STAGE"
echo "MODEL_SIZE: $MODEL_SIZE"
echo "LEARNING_RATE: $LEARNING_RATE"
echo "NUM_WARMUP_STEPS: $NUM_WARMUP_STEPS"
echo "NUM_TRAIN_EPOCHS: $NUM_TRAIN_EPOCHS"
echo "MAX_SEQ_LEN: $MAX_SEQ_LEN"
echo "DATA_PATH: $DATA_PATH"
echo "IMAGES_LMDB_PATH:$IMAGES_LMDB_PATH"
echo "LOAD_CKPT_PATH: $LOAD_CKPT_PATH"
echo "SAVE_DIR: $SAVE_DIR"
echo "TRAIN_MICRO_BATCH_SIZE_PER_GPU: $TRAIN_MICRO_BATCH_SIZE_PER_GPU"
echo "ZERO_STAGE: $ZERO_STAGE"


if [[ ${MODEL_SIZE} == "1B-dense" ]]; then
    # Model Argument
    ## Dense Model Argument
    DIM=2048
    N_LAYERS=24
    N_HEADS=8
    N_KV_HEADS=2
    VOCAB_SIZE=6400 
    HIDDEN_DIM=-1 # 11008  # decided by MULTIPLE_OF if FFN hidden dim is -1
    MULTIPLE_OF=64
    NORM_EPS=1e-5
    ROPE_THETA=100000
    DROPOUT=0.0
    FLASH_ATTN=true

    ## MOE model Argument
    USE_MOE=false
    NUM_EXPERTS_PER_TOK=2
    N_ROUTED_EXPERTS=4
    N_SHARED_EXPERTS=true
    SCORING_FUNC=softmax
    AUX_LOSS_ALPHA=0.1
    SEQ_AUX=true
    NORM_TOPK_PROB=true

    # Train Argument
    LORA_RANK=160
else
    echo "invalid MODEL_SIZE"
fi

## hyper-parameter
#LEARNING_RATE=1e-4
LR_SCHEDULER_TYPE=cosine
#NUM_WARMUP_STEPS=2000
#NUM_TRAIN_EPOCHS=8

SEED=1
DEVICE=cuda

## data config
#TOKENIZER_PATH=$PROJECT_DIR/resource/tokenizer/minimind_video_tokenizer
TOKENIZER_PATH=$PROJECT_DIR/resource/tokenizer/minimind_tokenizer
NUM_WORKERS=8

# Accelerate Argument
## Deepspeed
#TRAIN_MICRO_BATCH_SIZE_PER_GPU=5
#GRADIENT_ACCUMULATION_STEPS=8
GRADIENT_CHECKPOINTING=true
DTYPE=fp16
GRADIENT_CLIPPING=1
ZERO_STAGE=1
STEP_PER_PRINT=1
WALL_CLOCK_BREAKDOWN=false

command="torchrun --nproc_per_node 8 train_deepspeed.py \
        --dim ${DIM} \
        --n_layers ${N_LAYERS} \
        --n_heads ${N_HEADS} \
        --n_kv_heads ${N_KV_HEADS} \
        --vocab_size ${VOCAB_SIZE} \
        --hidden_dim ${HIDDEN_DIM} \
        --multiple_of ${MULTIPLE_OF} \
        --norm_eps ${NORM_EPS} \
        --max_seq_len ${MAX_SEQ_LEN} \
        --rope_theta ${ROPE_THETA} \
        --dropout ${DROPOUT} \
        --num_experts_per_tok ${NUM_EXPERTS_PER_TOK} \
        --n_routed_experts ${N_ROUTED_EXPERTS} \
        --scoring_func ${SCORING_FUNC} \
        --aux_loss_alpha ${AUX_LOSS_ALPHA} \
        --learning_rate ${LEARNING_RATE} \
        --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEPS} \
        --lr_scheduler_type ${LR_SCHEDULER_TYPE} \
        --num_warmup_steps ${NUM_WARMUP_STEPS} \
        --num_train_epochs ${NUM_TRAIN_EPOCHS} \
        --num_workers ${NUM_WORKERS} \
        --seed ${SEED} \
        --data_path ${DATA_PATH} \
        --images_path ${IMAGES_PATH} \
        --tokenizer_path ${TOKENIZER_PATH} \
        --max_seq_len ${MAX_SEQ_LEN} \
        --dtype ${DTYPE} \
        --gradient_clipping ${GRADIENT_CLIPPING} \
        --train_micro_batch_size_per_gpu ${TRAIN_MICRO_BATCH_SIZE_PER_GPU} \
        --zero_stage ${ZERO_STAGE} \
        --device ${DEVICE} \
        --save_dir ${SAVE_DIR} \
        --train_stage ${TRAIN_STAGE} \
        --lora_rank ${LORA_RANK} \
        --steps_per_print ${STEP_PER_PRINT}"

# 根据环境变量动态添加布尔值参数
if [ "$USE_MOE" = true ]; then
    command="$command --use_moe"
fi

if [ "$N_SHARED_EXPERTS" = true ]; then
    command="$command --n_shared_experts"
fi

if [ "$FLASH_ATTN" = true ]; then
    command="$command --flash_attn"
fi

if [ "$SEQ_AUX" = true ]; then
    command="$command --seq_aux"
fi

if [ "$NORM_TOPK_PROB" = true ]; then
    command="$command --norm_topk_prob"
fi

if [ "$GRADIENT_CHECKPOINTING" = true ]; then
    command="$command --gradient_checkpointing"
fi

if [ "$WALL_CLOCK_BREAKDOWN" = true ]; then
    command="$command --wall_clock_breakdown"
fi

if [ -n "${LOAD_CKPT_PATH}" ]; then
    command="$command --load_ckpt_path ${LOAD_CKPT_PATH}"
fi

if [ "$IMAGES_LMDB_PATH" != "None" ]; then
    command="$command --images_lmdb_path ${IMAGES_LMDB_PATH}"
fi

echo "dataset length:"
wc -l ${DATA_PATH}
echo ${command} | sed "s/--/\n  --/g"

echo "============================================"
mkdir -p $SAVE_DIR
${command} > $SAVE_DIR/training_${TRAIN_STAGE}.log 2>&1 &
wait


# TRAIN_STAGE = ["PT", "SFT", "LORA", "Distillation", "DPO", "VLM_PT", "VLM_SFT"]
if [[ ${TRAIN_STAGE} == "PT" || ${TRAIN_STAGE} == "VLM_PT" || ${TRAIN_STAGE} == "VideoLM_PT" ]]; then
    EVAL_MODEL_MODE=0
elif [[ ${TRAIN_STAGE} == "SFT" || ${TRAIN_STAGE} == "VLM_SFT" || ${TRAIN_STAGE} == "VideoLM_SFT" || ${TRAIN_STAGE} == "LORA" ]]; then
    EVAL_MODEL_MODE=1
elif [[ ${TRAIN_STAGE} == "DPO" ]]; then
    EVAL_MODEL_MODE=2
elif [[ ${TRAIN_STAGE} == "Distillation" ]]; then
    EVAL_MODEL_MODE=3
else
    echo "Unknown TRAIN_STAGE: ${TRAIN_STAGE}"
    exit 1
fi
echo "EVAL_MODEL_MODE: $EVAL_MODEL_MODE"

if [[ ${TRAIN_STAGE} == "VLM_PT" ||  ${TRAIN_STAGE} == "VLM_SFT" ]]; then
    echo "start python inference_vlm.py"
    python inference_vlm.py \
    --model_mode ${EVAL_MODEL_MODE} \
    --dim ${DIM}  \
    --n_layers ${N_LAYERS} \
    --n_kv_heads ${N_KV_HEADS} \
    --max_seq_len ${MAX_SEQ_LEN} \
    --load_ckpt_path ${SAVE_DIR}/${TRAIN_STAGE}.pth  \
    > ${SAVE_DIR}/infer.log 2>&1
elif [[ ${TRAIN_STAGE} == "VideoLM_PT" ||  ${TRAIN_STAGE} == "VideoLM_SFT" ]]; then
    echo "start python inference_videolm.py"
    python inference_videolm.py \
    --dim ${DIM}  \
    --n_layers ${N_LAYERS} \
    --n_kv_heads ${N_KV_HEADS} \
    --max_seq_len ${MAX_SEQ_LEN} \
    --load_ckpt_path ${SAVE_DIR}/${TRAIN_STAGE}.pth  \
    > ${SAVE_DIR}/infer.log 2>&1
else
    echo "start python inference.py"
    python inference.py \
    --model_mode ${EVAL_MODEL_MODE} \
    --dim ${DIM}  \
    --n_layers ${N_LAYERS} \
    --n_kv_heads ${N_KV_HEADS} \
    --max_seq_len ${MAX_SEQ_LEN} \
    --load_ckpt_path ${SAVE_DIR}/${TRAIN_STAGE}.pth  \
    > ${SAVE_DIR}/infer.log 2>&1
fi

python ../tools/plot/plot_loss.py \
    --log_file_path ${SAVE_DIR}/training_${TRAIN_STAGE}.log \
    --save_name ${TRAIN_STAGE} \
    --save_path ${SAVE_DIR} \
