#!/bin/bash
set -eo pipefail

TP=${TP:-1}
PP=${PP:-2}
EP=${EP:-4}
DIST_BACKEND=${DIST_BACKEND:-nccl}
PR=${PR:-bf16}
FP8=${FP8:-true}
TIMER_PRINT={TIMER_PRINT:-false}
TRAIN_ITERS=${TRAIN_ITERS:-100}
CHECKPOINT_LOAD_PATH=${CHECKPOINT_LOAD_PATH:-"/mnt/x10000/002266/models/DeepSeek-V2-Lite/checkpoint-100"}

echo "Using MUSA backend"

MICRO_BATCH_SIZE=${MICRO_BATCH_SIZE:-2}
GLOBAL_BATCH_SIZE=${GLOBAL_BATCH_SIZE:-1024}
#GLOBAL_BATCH_SIZE=4096


SEQ_LEN=${SEQ_LEN:-4096}
DATA_PATH=${DATASET_PATH:-"/mnt/x10000/002266/datasets/deepseekv2_datasets/mmap_deepseekv2_datasets_text_document"}
TOKENIZED_MODEL=${TDIR:-"/mnt/x10000/002266/models/DeepSeek-V2-Lite"}

OUTPUT_DIR=${OUTPUT_DIR:-"./output"}


unset MLFLOW_TRACKING_URI

###########################
###### change for multinode config
###########################
NUM_NODES=${WORLD_SIZE:-1}
CURRENT_TIME=$(date "+%Y-%m-%d_%H:%M")
MASTER_ADDR=${MASTER_ADDR:-"localhost"}
MASTER_PORT=${MASTER_PORT:-"7018"}
GPUS_PER_NODE=${TQ_GPU_NUM:-8}
RECOMPUTE_LAYERS=0
WORLD_SIZE=$((GPUS_PER_NODE * NUM_NODES))
DP_SIZE=$((WORLD_SIZE / (PP * TP)))
NODE_RANK=${RANK:-0}
SAVE_INTERVAL=${SAVE_INTERVAL:-100}
EXIT_INTERVAL=${EXIT_INTERVAL:-220}
###########################
EXPNAME="tp${TP}_pp${PP}_dp${DP_SIZE}_mbs${MICRO_BATCH_SIZE}_numbs${NUM_MICROBATCHES}_gbs${GLOBAL_BATCH_SIZE}_gpus${WORLD_SIZE}"

###########################
###### envrioment variables
###########################
# export ENABLE_D2H_IN_PERMUTATION=1  #摩尔脚本无
export NO_LOSS_REDUCE=1
export USE_RECOMPUTE_VARIANCE=1
# export USE_MUSA_MOE=1

export LOGLEVEL="INFO"
export MUSA_EXECUTION_TIMEOUT=20000000
export MUSA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
export CUDA_DEVICE_MAX_CONNECTIONS=1
export MUSA_KERNEL_TIMEOUT=3200000
export ACCELERATOR_BACKEND="musa"
export MCCL_PROTOS=2
export MCCL_CHECK_POINTERS=0
export OMP_NUM_THREADS=4
export MCCL_ALGOS=1

export MCCL_BUFFSIZE=20971520
export MUSA_BLOCK_SCHEDULE_MODE=1
export MCCL_IB_GID_INDEX=3
export MCCL_NET_SHARED_BUFFERS=0
export MCCL_IB_TC=122
export MCCL_IB_QPS_PER_CONNECTION=16


###########################
###### commonly used args
###########################
# MICRO_BS=1
# MICRO_CNT=64
# PP_SIZE=1
# TP_SIZE=1
# EP_SIZE=8
# HIDDEN_SIZE=2048
# NUM_LAYERS=48 # 61 infact

# 021-32B最新参数
HIDDEN_SIZE=2048
NUM_ATTN_HEADS=32
# 021-32B 为40层
NUM_LAYERS=40
INTERMEDIATE_SIZE=12288
MOE_INTERMEDIATE_SIZE=1536
MAX_POSITION_EMBEDDINGS=4096
Q_LORA_RANK=1536
KV_LORA_RANK=512
QK_NOPE_HEAD_DIM=128
QK_ROPE_HEAD_DIM=64
V_HEAD_DIM=128
ROPE_THETA=10000
SCALE_FACTOR=1
NUM_EXPERTS=80
ROUTER_TOPK=7
NUM_SHARED_EXPERTS=1
MOE_LAYER_FREQ=1
MOE_FIRST_K_DENSE_REPLACE=1
RMS_NORM_EPS=1e-6


if [ "$PP" -eq 1 ]; then
    LAST_STAGE_ARG=""
else
    # 计算：每个 stage 层数（商），余下层加到最后一个 stage
    LAYERS_PER_STAGE=$((NUM_LAYERS / $PP))
    REMAINING_LAYERS=$((NUM_LAYERS % $PP))
    LAST_STAGE=$((LAYERS_PER_STAGE + REMAINING_LAYERS))

    # 设置最后一个 stage 层数参数
    LAST_STAGE_ARG="--decoder-last-pipeline-num-layers ${LAST_STAGE}"
fi

###########################
###### exp name and log dir
###########################


echo "MASTER_ADDR: ${MASTER_ADDR}"
echo "NODE_ADDR: ${NODE_ADDR}"
echo "NODE_RANK: ${NODE_RANK}"

mkdir -p ${OUTPUT_DIR}/logs/${CURRENT_TIME}
LOG_FILE="${OUTPUT_DIR}/logs/${CURRENT_TIME}/${EXPNAME}.${NODE_RANK}.${NODE_ADDR}.log"

CHECKPOINT_PATH=${CHECKPOINT_PATH:-"${OUTPUT_DIR}/checkpoint/${EXPNAME}"}
mkdir -p ${CHECKPOINT_PATH}
TENSORBOARD_PATH="${OUTPUT_DIR}/tf_logs"

###########################
###### training args
###########################
DISTRIBUTED_ARGS=(
    --nproc_per_node $GPUS_PER_NODE 
    --nnodes $NUM_NODES 
    --node_rank $NODE_RANK 
    --master_addr $MASTER_ADDR 
    --master_port $MASTER_PORT
    #--log_dir $WORK_HOME/output_log/$RDZV_ID/$EXPNAME
    #--redirects 1
)

MODEL_ARGS=(
    --num-layers $NUM_LAYERS  # 60 
    --hidden-size ${HIDDEN_SIZE}
    --num-attention-heads $NUM_ATTN_HEADS
    --seq-length ${SEQ_LEN} 
    --max-position-embeddings $MAX_POSITION_EMBEDDINGS
    --norm-epsilon $RMS_NORM_EPS
    --attention-dropout 0.0 
    --hidden-dropout 0.0 
    --disable-bias-linear 
    --vocab-size 102400 
    --ffn-hidden-size $INTERMEDIATE_SIZE
    --position-embedding-type rope 
    # --use-rotary-position-embeddings
    #--no-position-embedding 
    # 匹配
    --rotary-base ${ROPE_THETA} 
    --rotary-scaling-factor ${SCALE_FACTOR} 
    # --no-position-embedding 
    --swiglu 
    --normalization RMSNorm
    --untie-embeddings-and-output-weights
    # 匹配
    # --cross-entropy-loss-fusion
)

MULTI_TOKEN_PREDICTION_ARGS=(
    # --use-multi-token-prediction
    # --mtp-coeff $MTP_COEFF
    # --mtp-depth $MTP_DEPTH
)

# 新增 TRACE_ARGS
TRACE_ARGS=(
    # --use-pytorch-profiler 
    # --profile 
    # --profile-ranks 0 
)

# 新增 gc
GC_ARGS=(
    --manual-gc
    --manual-gc-interval 500
)

# 24414062 1T
TRAINING_ARGS=(
    --lr-warmup-iters 0
    --seed 42 
    --micro-batch-size $MICRO_BATCH_SIZE 
    --global-batch-size $GLOBAL_BATCH_SIZE  
    # --train-samples 24414062 
    --train-iters ${TRAIN_ITERS}
    --init-method-std  0.006 # 0.02 in HF config, but 0.006 in the paper 
    --use-mcore-models 
    # 匹配
    # --no-gradient-accumulation-fusion
    --no-bias-dropout-fusion
    # --no-bias-swiglu-fusion
    --use-distributed-optimizer 
    --use-flash-attn 
    # 匹配
    # --sequence-parallel 
    --recompute-granularity full 
    --recompute-method block 
    --recompute-num-layers ${RECOMPUTE_LAYERS}
    --distributed-backend ${DIST_BACKEND}
    --multi-latent-attention
    --qk-layernorm

    "${LAST_STAGE_ARG}"
    
    --mlp-recompute
    --mlp-rms-recompute 
    --recompute-variance
)

MLA_ARGS=(
    --q-lora-rank ${Q_LORA_RANK} 
    --kv-lora-rank ${KV_LORA_RANK} 
    --qk-head-dim ${QK_NOPE_HEAD_DIM} 
    --qk-pos-emb-head-dim ${QK_ROPE_HEAD_DIM} 
    --v-head-dim ${V_HEAD_DIM} 
)

REGULARIZATION_ARGS=(
    --weight-decay 0.1 
    --adam-beta1 0.9 
    --adam-beta2 0.95 
    --clip-grad 1.0 
)

WARMUP_STEPS=2000
WARMUP_SAMPLES=$((WARMUP_STEPS * GLOBAL_BATCH_SIZE))

LEARNING_RATE_ARGS=(
    --lr 3.2e-4 
    --lr-decay-style cosine 
    # --lr-warmup-samples ${WARMUP_SAMPLES} 
    --min-lr 3.2e-05 
    --initial-loss-scale 65536 
    --min-loss-scale 1.0 
)

MODEL_PARALLEL_ARGS=(
	--tensor-model-parallel-size $TP  
	--pipeline-model-parallel-size $PP 
    
    # 对齐复现
    --tp-only-amax-red #对齐021 应注释该参数
    # --use-tp-pp-dp-mapping
)

if [ $PR = bf16 ]; then
    MIXED_PRECISION_ARGS=(
        --bf16 
        --attention-softmax-in-fp32 
        --no-masked-softmax-fusion 
        --accumulate-allreduce-grads-in-fp32
        # 匹配
        --grad-reduce-in-bf16
    )
fi

DATA_ARGS=(
    --data-path $DATA_PATH
    --tokenizer-type NullTokenizer
    # --tokenizer-model ${TOKENIZED_MODEL}
    --split 1
)

# TRANSFORMER_ENGINE_ARGS=(
#     --transformer-impl transformer_engine
#     # --transformer-impl local
#     # --fp8-format hybrid
#     # --fp8-param-gather
# )
if [ $FP8 = false ]; then
    TRANSFORMER_ENGINE_ARGS=(
        --transformer-impl transformer_engine
        # # --transformer-impl local
        # --fp8-format hybrid
        # --fp8-param-gather
        # 关掉fp8容易显存占满，增加recompute操作
        --attn-recompute
        --mla-rms-recompute #@huang
    )
else
    TRANSFORMER_ENGINE_ARGS=(
        --transformer-impl transformer_engine
        # # --transformer-impl local
        --fp8-format hybrid
        --fp8-param-gather
    )
fi



NUM_LAYERS=$(echo "${MODEL_ARGS[@]}" | grep -oP '(?<=--num-layers )\d+')
NUM_LAYERS_MINUS_ONE=$((NUM_LAYERS - 1))
MOE_LAYER_FREQ="([0]*1+[1]*${NUM_LAYERS_MINUS_ONE})*1"

EXPNAME="${EXPNAME}_recompute_layers_${RECOMPUTE_LAYERS}"
EXPNAME="${EXPNAME}_NUM_LAYERS_${NUM_LAYERS}"
EXPNAME="${EXPNAME}_HIDDEN_SIZE_${HIDDEN_SIZE}"
for arg in "${TRANSFORMER_ENGINE_ARGS[@]}"; do
    if [[ "$arg" == "--fp8-format" ]]; then
        EXPNAME="${EXPNAME}_--fp8"
        break  # 找到后立即退出循环
    fi
done

for arg in "${TRAINING_ARGS[@]}"; do
    if [[ "$arg" == "--recompute-variance" ]]; then
        EXPNAME="${EXPNAME}_--recompute-variance"
        break  # 找到后立即退出循环
    fi
done


EVAL_AND_LOGGING_ARGS=(
    --log-interval 1
    --log-throughput
    --log-timers-to-tensorboard
    --save-interval $SAVE_INTERVAL
    --eval-interval 1
    --save $CHECKPOINT_PATH 
    --load $CHECKPOINT_LOAD_PATH 
    --eval-iters 0
    --tensorboard-dir $TENSORBOARD_PATH
    --ckpt-format torch
    --exit-interval $EXIT_INTERVAL
)
if [ $TIMER_PRINT = true ]; then
    MEGATRON_LOGGING_ARGS=(
        --timing-log-level 0
        --timing-log-option all
    )
else
    MEGATRON_LOGGING_ARGS=""
fi




MOE_ARGS=(
    --num-experts $NUM_EXPERTS
    --expert-model-parallel-size $EP
    --moe-token-dispatcher-type alltoall
    # 匹配
    # --moe-router-num-groups $EP_SIZE
    # --moe-router-group-topk 1
    --moe-router-load-balancing-type aux_loss

    #匹配
    # --moe-router-load-balancing-type seq_aux_loss
    --moe-router-topk $ROUTER_TOPK
    # --moe-router-pre-softmax #deepseek use pre-softmax
    --moe-router-score-function softmax
    # --moe-router-norm-topk-prob
    --moe-router-topk-scaling-factor 2.643 # pre-softmax need scaling
    --moe-aux-loss-coeff 0.001
    # 匹配
    # --moe-expert-capacity-factor 1
    # --moe-device-level-capacity
    # --moe-device-level-aux-loss-coeff 5e-2 
    # --moe-comm-aux-loss-coeff 2e-2
    --moe-ffn-hidden-size $MOE_INTERMEDIATE_SIZE  #1536到768
    --moe-shared-expert-intermediate-size $((${MOE_INTERMEDIATE_SIZE} * ${NUM_SHARED_EXPERTS} ))
    --moe-layer-freq $MOE_LAYER_FREQ
    --moe-grouped-gemm
    --moe-permute-fusion
    
    
    # 为复现moer 0630的结果设置
    --moe-router-norm-topk-prob #对齐应注释
    --moe-router-load-balancing-type seq_aux_loss # 对齐应为aux_loss
    --moe-expert-capacity-factor 4.0 # 对齐应注释掉
)


    
# --moe-z-loss-coeff 1e-3
# --moe-expert-capacity-factor 4.0 
# --moe-pad-expert-input-to-capacity
# if [ -n "${WANDB_API_KEY}" ]; then
#     EVAL_AND_LOGGING_ARGS+=(
#         --wandb-project ${WANDB_PROJECT:-"Mixtral-Finetuning"}
#         --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x7B"} 
#     )
# fi

###########################
###### running scripts
###########################


ROOT_DIR=$( dirname -- "$( readlink -f -- "$0"; )"; )
ROOT_DIR=$(realpath "${ROOT_DIR}/../..") 
echo $ROOT_DIR

cd ../../Megatron/Megatron-LM-0.12
python setup.py build_ext --inplace
cd $ROOT_DIR/zj_examples/deepseek_v2


cat "${ROOT_DIR}/version.txt"


MEGATRON_PATH=${ROOT_DIR}
export PYTHONPATH=${MEGATRON_PATH}:${MEGATRON_PATH}/Megatron/Megatron-LM-0.12:$PYTHONPATH
export LD_LIBRARY_PATH=/usr/local/openmpi/lib:/usr/local/musa/lib:${LD_LIBRARY_PATH}

torchrun ${DISTRIBUTED_ARGS[@]} pretrain_deepseekv2.py \
        ${MODEL_ARGS[@]} \
        ${TRAINING_ARGS[@]} \
        ${REGULARIZATION_ARGS[@]} \
        ${LEARNING_RATE_ARGS[@]} \
        ${MODEL_PARALLEL_ARGS[@]} \
        ${MIXED_PRECISION_ARGS[@]} \
        ${DATA_ARGS[@]} \
        ${MOE_ARGS[@]} \
        ${MLA_ARGS[@]} \
        ${TRACE_ARGS[@]} \
        ${GC_ARGS[@]} \
        ${EVAL_AND_LOGGING_ARGS[@]} \
        ${MEGATRON_LOGGING_ARGS[@]} \
        ${MULTI_TOKEN_PREDICTION_ARGS[@]} \
        ${TRANSFORMER_ENGINE_ARGS[@]} 2>&1 | tee ${LOG_FILE}
set +x