#!/bin/bash

export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_SOCKET_IFNAME=eth0
export NCCL_IB_DISABLE=0
export NCCL_IB_CUDA_SUPPORT=1
export NCCL_IB_GID_INDEX=0
export NCCL_IB_HCA=mlx5_0,mlx5_3
export NCCL_DEBUG=debug
# export OMP_NUM_THREADS=4

DATETIME=`date +'date_%y-%m-%d_time_%H-%M-%S'`
set -u
  EXPNAME="aquila_7b_mup-$DATETIME"
set +u

# Datasets
source $(dirname $0)/datasets.sh

# Change for project config
PROJ_HOME=<Specify the project home path>
DATA_PATH=<Specify the data path>
CHECKPOINT_PATH=$PROJ_HOME/checkpoints/$EXPNAME
mkdir -p $CHECKPOINT_PATH
TOKENIZER_PATH=$PROJ_HOME/tokenizer
VOCAB_FILE=examples/aquila/tokenizer/vocab.json
MERGE_FILE=examples/aquila/tokenizer/merges.txt
LOG_PATH=$PROJ_HOME/logs/$EXPNAME
mkdir -p $LOG_PATH
cp $0 $LOG_PATH/
LOG_FILE=$LOG_PATH/log.txt
TB_PATH=$PROJ_HOME/tboard/$EXPNAME
mkdir -p $TB_PATH
MUP_PATH=$PROJ_HOME/mup/$EXPNAME
mkdir -p $MUP_PATH

GPUS_PER_NODE=8
# Change for multinode config
MASTER_ADDR=192.168.5.2
MASTER_PORT=12345
NNODES=1
NODE_RANK=0
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))

DISTRIBUTED_ARGS="
    --nproc_per_node $GPUS_PER_NODE \
    --nnodes $NNODES \
    --node_rank $NODE_RANK \
    --master_addr $MASTER_ADDR \
    --master_port $MASTER_PORT \
    --log_dir $LOG_PATH --redirects 3 --tee 3
"

TRAINING_ARGS="
    --train-iters 500000 \
    --eval-iters 10 \
    --eval-interval 20 \
    --tensor-model-parallel-size 1 \
    --pipeline-model-parallel-size 1 \
    --micro-batch-size 1 \
    --global-batch-size 8 \
    --disable-bias-linear \
    --use-flash-attn \
    --use-distributed-optimizer
"

MUP_ARGS="
    --mup apply \
    --mup-attn-multiplier 8 \
    --readout-zero-init \
    --query-zero-init \
    --mup-coord-check \
    --mup-save $PROJ_HOME/mup
    "

MIXED_PRECISION_ARGS="
    --fp16 \
    --initial-loss-scale 131072 \
    --min-loss-scale 1.0 \
    --loss-scale-window 1024
"

DATA_ARGS="
    --data-path $DATA_PATH \
    --vocab-file $VOCAB_FILE \
    --vocab-size 100008\
    --merge-file $MERGE_FILE \
    --data-impl mmap \
    --split 998,1,1
"

NETWORK_ARGS="
    --num-layers 32 \
    --hidden-size 4096 \
    --num-attention-heads 32 \
    --seq-length 2048 \
    --max-position-embeddings 2048 \
    --use-rotary-position-embeddings \
    --no-position-embedding \
    --swiglu \
    --multiple-of 256 \
    --apply-layernorm-rms \
    --untie-embeddings-and-output-weights
"

INITIALIZATION_ARGS="
    --seed 1234 
"

REGULARIZATION_ARGS="
    --attention-dropout 0.0 \
    --hidden-dropout 0.0 \
    --weight-decay 1e-2 \
    --clip-grad 1.0
"

LEARNING_RATE_ARGS="
    --lr 0.00015 \
    --lr-decay-style cosine \
    --lr-decay-iters 320000 \
    --min-lr 1.0e-5 \
    --lr-warmup-fraction .01
"

CHECKPOINTING_ARGS="
    --no-load-optim \
    --no-load-rng \
    --save-interval 1000 \
    --save $CHECKPOINT_PATH \
    --load $CHECKPOINT_PATH
"

LOGGING_ARGS="
    --log-interval 1 \
    --timing-log-level 1 \
    --tensorboard-dir $TB_PATH \
    --tensorboard-log-interval 10 
"

torchrun $DISTRIBUTED_ARGS pretrain_gpt.py \
    $TRAINING_ARGS \
    $MUP_ARGS \
    $MIXED_PRECISION_ARGS \
    $DATA_ARGS \
    $NETWORK_ARGS \
    $INITIALIZATION_ARGS \
    $REGULARIZATION_ARGS \
    $LEARNING_RATE_ARGS \
    $CHECKPOINTING_ARGS \
    $LOGGING_ARGS
