#! /bin/bash

# Runs the "345M" parameter model
NGPUS_PER_NODE=8
STAGES_PER_NODE=2
GPUS_PER_STAGE=$(($NGPUS_PER_NODE/$STAGES_PER_NODE))
# Change for multinode config
MASTER_ADDR=172.16.1.83
MASTER_PORT=6002
NNODES=2
NODE_RANK=0
PIPELINE_SIZE=$(($NNODES * $STAGES_PER_NODE))
export NCCL_BUFFSIZE=419430400  # 将缓冲区大小设置为4MB
export NCCL_MIN_NRINGS=2      # 增加环的数量，可能会提高带宽利用率
DISTRIBUTED_ARGS="--nproc_per_node $STAGES_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
       train_llama2.py \
       --distributed-backend nccl \
