#! /bin/bash

# Runs the "345M" parameter model
NGPUS_PER_NODE=4
STAGES_PER_NODE=2
GPUS_PER_STAGE=$(($NGPUS_PER_NODE/$STAGES_PER_NODE))
# Change for multinode config
MASTER_ADDR=10.90.1.237
MASTER_PORT=6003
NNODES=1
NODE_RANK=0
TENSOR_PARALLEL_SIZE=1
PIPELINE_SIZE=$(($NNODES * $STAGES_PER_NODE))
# WORLD_SIZE=$(($STAGES_PER_NODE*$NNODES))

DISTRIBUTED_ARGS="--nproc_per_node $NGPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"

# vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
python -m torch.distributed.launch $DISTRIBUTED_ARGS \
       train_mistral.py \
       --tensor-model-parallel-size $TENSOR_PARALLEL_SIZE \
       --pipeline-model-parallel-size 4 \
       --virtual-pipeline-model-parallel-size 1 \
       --epoch 1 \
       --nstages_per_node $STAGES_PER_NODE \
       --micro-batch-size  2 \
       --micro-batch 8 \
       --distributed-backend nccl \
       --tensor-length 128