#! /bin/bash

## Runs the "345M" parameter model
#NGPUS_PER_NODE=8
#STAGES_PER_NODE=2
#GPUS_PER_STAGE=$(($NGPUS_PER_NODE/$STAGES_PER_NODE))
## Change for multinode config
#MASTER_ADDR=10.90.1.237
#MASTER_PORT=6010
#NNODES=1
#NODE_RANK=0

NGPUS_PER_NODE=${NGPUS_PER_NODE:-8}
STAGES_PER_NODE=2
MASTER_ADDR=10.90.1.237
MASTER_PORT=6020
NNODES=${NNODES:-1}
NODE_RANK=${NODE_RANK:-0}
GPUS_PER_STAGE=$(($NGPUS_PER_NODE/$STAGES_PER_NODE))
#TENSOR_PARALLEL_SIZE=2
#PIPELINE_PARALLEL_SIZE=4
# 从环境变量读取值，如果未设置则使用默认值
TENSOR_PARALLEL_SIZE=${TENSOR_PARALLEL_SIZE:-1}
PIPELINE_PARALLEL_SIZE=${PIPELINE_PARALLEL_SIZE:-2}
VIRTUAL_PIPELINE_MODEL_PARALLEL_SIZE=1
# WORLD_SIZE=$(($STAGES_PER_NODE*$NNODES))

# Microbatch configuration
MICRO_BATCH_SIZE=${MICRO_BATCH_SIZE:-2}
MICRO_BATCH=${MICRO_BATCH:-8}

# Tensor length (sequence length)
TENSOR_LENGTH=${TENSOR_LENGTH:-256}
HIDDEN_SIZE=${HIDDEN_SIZE:-4096}
NUM_HIDDEN_LAYERS=${NUM_HIDDEN_LAYERS:-16}
NUM_ATTENTION_HEADS=${NUM_ATTENTION_HEADS:-32}
VOCAB_SIZE=${VOCAB_SIZE:-32000}


DISTRIBUTED_ARGS="--nproc_per_node $NGPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"

# vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
       train_llama2_overlap_vpp.py \
       --virtual-pipeline-model-parallel-size $VIRTUAL_PIPELINE_MODEL_PARALLEL_SIZE \
       --epoch 1 \
       --nstages_per_node $STAGES_PER_NODE \
       --micro-batch-size $MICRO_BATCH_SIZE \
       --micro-batch $MICRO_BATCH \
       --distributed-backend nccl \
       --tensor-length $TENSOR_LENGTH \
       --hidden-size $HIDDEN_SIZE \
       --num-hidden-layers $NUM_HIDDEN_LAYERS \
       --num-attention-heads $NUM_ATTENTION_HEADS \
       --vocab-size $VOCAB_SIZE


# --tensor-model-parallel-size $TENSOR_PARALLEL_SIZE \
       # --pipeline-model-parallel-size $PIPELINE_PARALLEL_SIZE \