#!/bin/bash

export ENABLE_FLASH_ATTENTION_WITH_IXDNN=0
export CUDA_VISIBLE_DEVICES='0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15'
time=$(date "+%Y%m%d-%H%M%S")
echo "time : $time"


NPROC_PER_NODE=16
NNODES=1
RANK=0 #主机为0 ，其它机器为1
MASTER_ADDR=$(ip -4 addr show | grep inet | grep '10.31.10.' | awk '{print $2}' | cut -d/ -f1)
export NCCL_SOCKET_IFNAME=br0
MASTER_PORT=14788

DATE=`date +%y%m%d%H%M%S`
LOG_DIR=${LOG_DIR:-train_log}

if [ ! -d ${LOG_DIR} ]; then mkdir -p ${LOG_DIR}; fi
{   set -x;
    python3 -m torch.distributed.run \
    --nproc_per_node $NPROC_PER_NODE \
    --nnodes $NNODES \
    --node_rank $RANK \
    --master_addr $MASTER_ADDR \
    --master_port $MASTER_PORT \
    ../src/train.py \
    --stage sft \
    --deepspeed ../examples/deepspeed/ds_z3_config.json \
    --model_name_or_path /data1/df/models/llama2-13b/Llama-2-13b-hf \
    --do_train \
    --dataset alpaca_gpt4_en \
    --dataset_dir ../data \
    --template llama2 \
    --finetuning_type full \
    --output_dir ./outputllm_1 \
    --overwrite_cache \
    --overwrite_output_dir \
    --cutoff_len 2048 \
    --preprocessing_num_workers 16 \
    --per_device_train_batch_size 16 \
    --per_device_eval_batch_size 20 \
    --gradient_accumulation_steps 1 \
    --lr_scheduler_type cosine \
    --logging_steps 1 \
    --warmup_steps 20 \
    --save_steps 40000 \
    --eval_steps 600 \
    --evaluation_strategy steps \
    --learning_rate 5e-5 \
    --num_train_epochs 3.0 \
    --max_samples 30000000 \
    --val_size 0.1 \
    --plot_loss \
    --flash_attn fa2 \
    --print_param_status \
    --bf16 
} |& tee ${LOG_DIR}/output_${RANK}.log
    
    # --flash_attn \
        # --flash_attn fa2\
    # -- disable_gradient_checkpointing

# sleep 500

# ps -elf | grep "ixsmi dmon" | awk '{print $4}' | xargs kill -s 9 > /dev/null 

echo "DONE!!"
