#!/bin/bash

# Runs the "345M" parameter model

export CUDA_DEVICE_MAX_CONNECTIONS=1

DATA_PATH=/workspace/data/llama_text_document
TOKENIZER_PATH=/workspace/tokenizers/Llama2Tokenizer/tokenizer.model

# Distributed training variables
NNODES=1
GPUS_PER_NODE=8
GPU_NUM=$((${GPUS_PER_NODE}*${NNODES}))
WORLD_SIZE=$((${GPUS_PER_NODE}*${NNODES}))

# Parallelism variables
TP=8
PP=1
CP=1
DP=$((${GPU_NUM}/${TP}/${PP}/${CP}))
GLOBAL_SIZE=8
MICOR_SIZE=$((${GLOBAL_SIZE}/${DP}))

# Network size variables
MODEL_SIZE=7

if   [[ ${MODEL_SIZE} == 7 ]];   then HIDDEN_SIZE=4096;  NUM_HEAD=32; NUM_QUERY_GROUP=32; NUM_LAYERS=32; FFN_HIDDEN_SIZE=11008; NORM_EPS=1e-5;
elif [[ ${MODEL_SIZE} == 13 ]];  then HIDDEN_SIZE=5120;  NUM_HEAD=40; NUM_QUERY_GROUP=40; NUM_LAYERS=40; FFN_HIDDEN_SIZE=13824; NORM_EPS=1e-5;
elif [[ ${MODEL_SIZE} == 70 ]];  then HIDDEN_SIZE=8192;  NUM_HEAD=64; NUM_QUERY_GROUP=8;  NUM_LAYERS=80; FFN_HIDDEN_SIZE=28672; NORM_EPS=1e-5;
elif [[ ${MODEL_SIZE} == "tiny" ]]; then HIDDEN_SIZE=128;  NUM_HEAD=4; NUM_QUERY_GROUP=4; NUM_LAYERS=4; FFN_HIDDEN_SIZE=512; NORM_EPS=1e-5;
else echo "invalid MODEL_SIZE: ${MODEL_SIZE}"; exit 1
fi

DROP_OUT=0.0
MAX_SEQ_LEN=4096
MAX_POSITION_EMBEDDINGS=4096


DISTRIBUTED_ARGS=" \
    --tensor-model-parallel-size ${TP} \
    --pipeline-model-parallel-size ${PP} \
    --context-parallel-size ${CP}
    --sequence-parallel \
    --distributed-backend nccl \
    --use-distributed-optimizer
"

GPT_ARGS="
    --num-layers ${NUM_LAYERS} \
    --hidden-size ${HIDDEN_SIZE} \
    --num-attention-heads ${NUM_HEAD} \
    --group-query-attention \
    --num-query-groups ${NUM_QUERY_GROUP} \
    --ffn-hidden-size ${FFN_HIDDEN_SIZE} \
    --position-embedding-type rope \
    --make-vocab-size-divisible-by 1 \
    --norm-epsilon ${NORM_EPS} \
    --normalization RMSNorm \
    --swiglu \
    --untie-embeddings-and-output-weights \
    --max-position-embeddings ${MAX_POSITION_EMBEDDINGS} \
    --micro-batch-size ${MICOR_SIZE} \
    --global-batch-size ${GLOBAL_SIZE} \
    --lr 0.00015 \
    --train-iters 20 \
    --lr-decay-iters 320000 \
    --lr-decay-style cosine \
    --min-lr 1.0e-5 \
    --weight-decay 1e-2 \
    --lr-warmup-fraction .01 \
    --clip-grad 1.0 \
    --fp16 \
    --attention-softmax-in-fp32
"

DATA_ARGS="
    --data-path $DATA_PATH \
    --tokenizer-type Llama2Tokenizer \
    --seq-length ${MAX_SEQ_LEN} \
    --tokenizer-model ${TOKENIZER_PATH} \
    --split 90,5,5
"

OUTPUT_ARGS="
    --log-interval 1 \
    --eval-iters 0
"

torchrun --standalone --nproc_per_node=${GPUS_PER_NODE} pretrain_llama.py \
    $GPT_ARGS \
    $DATA_ARGS \
    $OUTPUT_ARGS \
    $DISTRIBUTED_ARGS \
