#! /bin/bash

NGPUS_PER_NODE=8
MASTER_ADDR=10.200.44.6
MASTER_PORT=6002
NNODES=1
NODE_RANK=0
TENSOR_PARALLEL_SIZE=1
STAGES_PER_NODE=2

DISTRIBUTED_ARGS="
    --nproc_per_node $NGPUS_PER_NODE \
    --nnodes $NNODES \
    --node_rank $NODE_RANK \
    --master_addr $MASTER_ADDR \
    --master_port $MASTER_PORT
"

SEARCH_ARGS="
    --auto-parallel \
    --nnodes $NNODES \
    --nproc-per-node $NGPUS_PER_NODE \
    --master-addr $MASTER_ADDR \
    --master-port $MASTER_PORT \
    --node-rank $NODE_RANK \
"

# TODO 用python启动还是torchRun启动
# vpp attention:number of microbatches must be divisible by pipeline-model-parallel-size when using interleaved schedule
python3  train_llama2_autoParallel.py \
       $SEARCH_ARGS \
       --epoch 1 \
       --nstages_per_node $STAGES_PER_NODE \
       --distributed-backend nccl \
       --tensor-length 256 \
       --global-batch-size 32 \
       --micro-batch-size  2 \
       --tensor-model-parallel-size 1 \
       --pipeline-model-parallel-size 8 \
#       --virtual-pipeline-model-parallel-size 1 \
#       --micro-batch 8 \

# Attention.When use autoParallel ,we search for a good [pp, tp, dp, mbs],so --micro-batch-size might not be useful here.

