echo ============================================================================
###
 # @Author: gaoxinglong
 # @Date: 2022-11-12 12:11:57
 # @LastEditTime: 2022-11-14 10:15:49
 # @LastEditors: gaoxinglong
### 


echo "                      CHINESE KWS TRAIN WITH WEKWS                       "
echo ============================================================================

ps aux | grep train.py | awk '{print $2}' | xargs kill -9
echo ============================================================================
echo "                           kws Training Start                             "
echo ============================================================================
gpu="0,1,2,3,4,5,6,7"
n_gpus=$(echo ${gpu} | tr "," "\n" | wc -l)
env_root=/home/gaoxinglong/env/tools/rnnt-asr
echo "env_root: " $env_root
echo "n_gpus" ${n_gpus}
exp_dir=${env_root}/exp
mkdir -p $exp_dir

export PYTHONPATH=$PYTHONPATH:$PWD
export CUDA_VISIBLE_DEVICES=${gpu}
export NCCL_DEBUG=INFO
export NCCL_IB_GID_INDEX=3
export NCCL_IB_HCA=bond0.250
export NCCL_IB_CUDA_SUPPORT=bond0.250
#export NCCL_SOCKET_IFNAME=en,eth,em,bond0.250
export NCCL_SOCKET_IFNAME=bond0
export OMP_NUM_THREADS=${n_gpus}


NCCL_IB_DISABLE=0
NCCL_GDR_FLUSH_DISABLE=1
NCCL_NET_GDR_LEVEL=2
NCCL_DEBUG=INFO # 开启第⼆链路debug信息
PERSEUS_NCCL_ENABLE=1 # 开启第⼆NCCL链路
PERSEUS_NCCL_SOCKET_IFNAME=bond0.250 # 设定第⼆链路为VPC的eth0
PERSEUS_NCCL_IB_DISABLE=1 # 关闭第⼆链路的RDMA
PERSEUS_NCCL_DEBUG=INFO # 开启第⼆联路的debug 信息
    
num_nodes=1
node=0
node_rank=0
echo "=====================STAGE 1: TRAIN MODEL USING DDP ====================="
# Training
INIT_FILE=$exp_dir/ddp_init
# You had better rm it manually before you start run.sh on first node.
rm -f $INIT_FILE # delete old one before starting
init_method=file://$(readlink -f $INIT_FILE)
echo "$0: init method is $init_method"
num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
echo "total num_gpus is: $num_gpus"
# Use "nccl" if it works, otherwise use "gloo"
dist_backend="nccl"
# The total number of processes/gpus, so that the master knows
# how many workers to wait for.
# More details about ddp can be found in
# https://pytorch.org/tutorials/intermediate/dist_tuto.html
world_size=`expr $num_gpus \* $num_nodes`
echo "total world_size is: $world_size"
cmvn_opts=
# $use_global_cmvn && cp ${format_data_dir}/train/global_cmvn $exp_dir
# $use_global_cmvn && cmvn_opts="--cmvn ${exp_dir}/global_cmvn"
# train.py will write $train_config to $dir/train.yaml with model input
# and output dimension, train.yaml will be used for inference or model
# export later
python_cmd=$(readlink -f $(which python3))
# python_cmd=python
# python_cmd=/home/gaoxinglong/env/bin/anaconda3/bin/python
train_cmd=${env_root}/wekws/bin/train.py
for ((i = 0; i < $num_gpus; ++i)); do
{
    gpu_id=$(echo $CUDA_VISIBLE_DEVICES | cut -d',' -f$[$i+1])
    # Rank of each gpu/process used for knowing whether it is
    # the master of a worker.
    echo 'node_rank :' $node_rank  'num_gpus : ' $num_gpus $i 'gpu_id: ' $gpu_id
    global_rank=`expr $node_rank \* $num_gpus + $i`
    echo "global_rank: " $global_rank
    $python_cmd $train_cmd  --gpu $gpu_id \
    init_method $init_method \
    world_size $world_size \
    local_rank $global_rank  2>&1 | tee ${exp_dir}/train.log
} &
done
wait

echo "Finish ASR model training"
