#! /bin/bash

# Runs the "345M" parameter model
NGPUS_PER_NODE=8
STAGES_PER_NODE=1
GPUS_PER_SATGE=$(($NGPUS_PER_NODE/$STAGES_PER_NODE))
# Change for multinode config
MASTER_ADDR=10.90.1.237
MASTER_PORT=6001
NNODES=1
NODE_RANK=0
# WORLD_SIZE=$(($STAGES_PER_NODE*$NNODES))
PIPELINE_SIZE=$(($NNODES * $STAGES_PER_NODE))

DISTRIBUTED_ARGS="--nproc_per_node $STAGES_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"

python -m torch.distributed.launch $DISTRIBUTED_ARGS \
       train_mistral.py \
       --tensor-model-parallel-size 1 \
       --pipeline-model-parallel-size $PIPELINE_SIZE \
       --epoch 1 \
       --gpus_per_stage $GPUS_PER_SATGE \
       --nstages_per_node $STAGES_PER_NODE \
       --micro-batch-size 8 \
       --micro-batch 8 \
       --distributed-backend nccl \
       --tensor-length 128


