#!/bin/bash
set -eo pipefail

GPUS_PER_NODE=${TQ_GPU_NUM:-8}
NUM_NODES=${WORLD_SIZE:-1}
MASTER_ADDR=${MASTER_ADDR:-"localhost"}
NODE_RANK=${RANK:-0}
MASTER_PORT=${MASTER_PORT:-"12344"}
RECOMPUTE_LAYERS=0


CURRENT_TIME=$(date "+%Y-%m-%d_%H:%M:%S")
TP=${TP:-1}
PP=${PP:-1}
AC=${AC:-none}
PR=${PR:-bf16}
SEQ_LEN=${SEQ_LEN:-4096}
TRAIN_ITERS=${TRAIN_ITERS:-10}
(( WORLD_SIZE=$GPUS_PER_NODE * $NUM_NODES))
NUM_MICROBATCHES=16
MICRO_BATCH_SIZE=${MICRO_BATCH_SIZE:-1}
(( DP_SIZE = $WORLD_SIZE / ($TP * $PP) ))
#(( GLOBAL_BATCH_SIZE = $MICRO_BATCH_SIZE * $NUM_MICROBATCHES * $DP_SIZE ))
GLOBAL_BATCH_SIZE=${GLOBAL_BATCH_SIZE:-1024}
DIST_BACKEND=${DIST_BACKEND:-nccl}
WORK_HOME="$PWD"
PATCH_HOME="$PWD"/../..
EXPNAME="tp${TP}_pp${PP}_dp${DP_SIZE}_mbs${MICRO_BATCH_SIZE}_numbs${NUM_MICROBATCHES}_gbs${GLOBAL_BATCH_SIZE}_gpus${WORLD_SIZE}"
DATA_PATH=${DATASET_PATH:-"/mnt/x10000/002266/datasets/llama2/pile-llama_text_document"}
TOKENIZED_MODEL=${TDIR:-"/mnt/x10000/002266/models/llama2-7b-hf/tokenizer.model"}
RDZV_ID=$CURRENT_TIME
OUTPUT_DIR=${OUTPUT_DIR:-"./output"}

unset MLFLOW_TRACKING_URI
echo "所有参数: \$* = $*"
echo "传递给脚本的参数个数是：$#"
RECOMPUTE_LAYERS=0
echo "AC=$AC"
if [ "$AC" = "full" ]; then
  export RECOMPUTE_LAYERS=1
fi

# export ENABLE_PROFILER=1
# export PROFILER_FREQ=4
export OMP_NUM_THREADS=4
export MUSA_VISIBLE_DEVICES='0,1,2,3,4,5,6,7'
export MUSA_EXECUTION_TIMEOUT=3200000
export ACCELERATOR_BACKEND="musa"
export MCCL_PROTOS=2
export MCCL_CHECK_POINTERS=0
export CUDA_DEVICE_MAX_CONNECTIONS=1
export MUSA_BLOCK_SCHEDULE_MODE=1
export MCCL_IB_GID_INDEX=3
export MUSA_PRINT_ENV=1
export MCCL_ALGOS=1
export MUSA_EXECUTE_COUNT=1 
export MCCL_BUFFSIZE=20480000

ROOT_DIR=$( dirname -- "$( readlink -f -- "$0"; )"; )
ROOT_DIR=$(realpath "${ROOT_DIR}/../..") 
echo $ROOT_DIR

MEGATRON_PATH=${ROOT_DIR}
export PYTHONPATH=${MEGATRON_PATH}:${MEGATRON_PATH}/Megatron/Megatron-LM-0.12:$PYTHONPATH
export LD_LIBRARY_PATH=/usr/local/openmpi/lib:/usr/local/musa/lib:${LD_LIBRARY_PATH}


# if [ ! -d "${MEGATRON_PATH}/build" ]; then
#     cd "${MEGATRON_PATH}"
#     python setup.py build_ext --inplace
#     cd -
# fi


mkdir -p ${OUTPUT_DIR}/logs/${CURRENT_TIME}
LOG_FILE="${OUTPUT_DIR}/logs/${CURRENT_TIME}/${EXPNAME}.${NODE_RANK}.${NODE_ADDR}.log"
CHECKPOINT_PATH="${OUTPUT_DIR}/checkpoint/${EXPNAME}"
mkdir -p $CHECKPOINT_PATH
TENSORBOARD_PATH=${OUTPUT_DIR}/tf_logs


DISTRIBUTED_ARGS=(
    --nproc_per_node $GPUS_PER_NODE 
    --nnodes $NUM_NODES 
    --node_rank $NODE_RANK 
    --master_addr $MASTER_ADDR 
    --master_port $MASTER_PORT 
)

MODEL_SIZE=7B

MODEL_ARGS=(
    --num-layers 32
    --hidden-size 4096 
    --ffn-hidden-size 11008
    --num-attention-heads 32 
    # --group-query-attention 
    # --num-query-groups 8
    --seq-length ${SEQ_LEN} 
    --max-position-embeddings 4096 
    --norm-epsilon 1e-5 
    --attention-dropout 0.0 
    --hidden-dropout 0.0 
    --disable-bias-linear 
    --position-embedding-type rope 
    # --use-rotary-position-embeddings
    # have used rope as positional encoding
    --no-position-embedding 
    --swiglu 
    --normalization RMSNorm
    --untie-embeddings-and-output-weights
)

if [ $MODEL_SIZE = 70B ]; then
    MODEL_ARGS=(
    --num-layers 80
    --hidden-size 8192 
    --ffn-hidden-size 28672
    --num-attention-heads 64 
    # llama2-70B 启用了GQA，下面两个为gqa参数
    --group-query-attention 
    --num-query-groups 8
    --seq-length ${SEQ_LEN} 
    --max-position-embeddings 4096 
    --norm-epsilon 1e-5 
    --attention-dropout 0.0 
    --hidden-dropout 0.0 
    --disable-bias-linear 
    --position-embedding-type rope 
    --use-rotary-position-embeddings
    # --no-position-embedding 
    --swiglu 
    --normalization RMSNorm
    --untie-embeddings-and-output-weights
)
fi

# 244140625 1T
TRAINING_ARGS=(
    --seed 42 
    --micro-batch-size $MICRO_BATCH_SIZE 
    --global-batch-size $GLOBAL_BATCH_SIZE  
    # --train-samples 24414062 
    --train-iters ${TRAIN_ITERS}
    --init-method-std 0.008
    --use-mcore-models 
    #--no-gradient-accumulation-fusion 
    --no-bias-dropout-fusion
    # --no-bias-swiglu-fusion
    --use-distributed-optimizer 
    --use-flash-attn 
    --sequence-parallel 
    --recompute-granularity full 
    --recompute-method block 
    --recompute-num-layers ${RECOMPUTE_LAYERS}
    --distributed-backend ${DIST_BACKEND} 
    # --transformer-impl transformer_engine
)
    # --no-rope-fusion

# --transformer-impl local transformer_engine
REGULARIZATION_ARGS=(
    --weight-decay 0.1 
    --adam-beta1 0.9 
    --adam-beta2 0.95 
    --clip-grad 1.0 
)

WARMUP_STEPS=2000
WARMUP_SAMPLES=$((WARMUP_STEPS * GLOBAL_BATCH_SIZE))

LEARNING_RATE_ARGS=(
    --lr 1.5e-5 
    --lr-decay-style cosine 
    # --lr-warmup-samples ${WARMUP_SAMPLES} 
    --min-lr 1.5e-6 
    --initial-loss-scale 65536 
    --min-loss-scale 1.0 
)

MODEL_PARALLEL_ARGS=(
	--tensor-model-parallel-size $TP  
	--pipeline-model-parallel-size $PP
)
if [ $PR = bf16 ]; then
    MIXED_PRECISION_ARGS=(
        --bf16 
        --attention-softmax-in-fp32 
        --no-masked-softmax-fusion 
        --accumulate-allreduce-grads-in-fp32
    )
fi

DATA_ARGS=(
    --data-path $DATA_PATH 
    --tokenizer-type Llama2Tokenizer 
    --tokenizer-model ${TOKENIZED_MODEL} 
    --split 1
)

TRANSFORMER_ENGINE_ARGS=(
    --transformer-impl local
)

if [ "$PRECISION" = "fp8" ]; then
    TRANSFORMER_ENGINE_ARGS=(
        --transformer-impl transformer_engine
        # --transformer-impl local
        --fp8-format hybrid
        --fp8-param-gather
    )
fi

# DATA_ARGS=(
#     --data-path $DATA_PATH 
#     --vocab-file $VOCAB_FILE 
#     --merge-file $MERGE_FILE 
#     --split 949,50,1
# )



EVAL_AND_LOGGING_ARGS=(
    --log-interval 1
    --save-interval 100 
    --log-throughput
    --eval-interval 100 
    --save $CHECKPOINT_PATH 
    --load $CHECKPOINT_PATH 
    --eval-iters 0
    --tensorboard-dir $TENSORBOARD_PATH 
)

# if [ -n "${WANDB_API_KEY}" ]; then
#     EVAL_AND_LOGGING_ARGS+=(
#         --wandb-project ${WANDB_PROJECT:-"Mixtral-Finetuning"}
#         --wandb-exp-name ${WANDB_NAME:-"Mixtral_8x7B"} 
#     )
# fi

torchrun ${DISTRIBUTED_ARGS[@]} distributed_c10d_x10000_dp.py \
        ${MODEL_ARGS[@]} \
        ${TRAINING_ARGS[@]} \
        ${REGULARIZATION_ARGS[@]} \
        ${LEARNING_RATE_ARGS[@]} \
        ${MODEL_PARALLEL_ARGS[@]} \
        ${MIXED_PRECISION_ARGS[@]} \
        ${DATA_ARGS[@]} \
        ${EVAL_AND_LOGGING_ARGS[@]} \
        ${TRANSFORMER_ENGINE_ARGS[@]} 2>&1 | tee ${LOG_FILE}

set +e