#!/bin/bash

###==========================================================================
### Usage: bash run_distribute_train_ascend.sh [OPTIONS]...
### Description:
###     Run distributed train for mm model.

### Example:
### 1. Train models with config. Configs in [] are optional.
### bash run_distribute_train.sh  configs/ttvitbaseall32guide.yaml /home/liupan/WingsofPanda/hccl_8p_01234567_127.0.1.1.json  mm vibaseflashatten 8 0`
###==========================================================================

if [ $# != 6 ]
then
  echo "Usage: bash run_distribute_train.sh [CONFIG_PATH] [RANK_TABLE_FILE] [EXP_TYPE] [EXP_NAME] [RANK_SIZE] [RANK_START]"
  exit 1
fi

get_real_path(){
  if [ "${1:0:1}" == "/" ]; then
    echo "$1"
  else
    echo "$(realpath -m $PWD/$1)"
  fi
}

export DEPLOY_MODE=0
export EXP_TYPE=$3
export EXP_NAME=$4
RANK_SIZE=$5
RANK_START=$6

CONFIG_PATH=$(get_real_path $1)
RANK_TABLE_FILE=$(get_real_path $2)

echo "CONFIG PATH: $CONFIG_PATH"
echo "EXP_TYPE: $EXP_TYPE"
echo "EXP_NAME: $EXP_NAME"
echo "RANK TABLE FILE: $RANK_TABLE_FILE"
echo "START DEVICE ID: $RANK_START"

if [ ! -f "$RANK_TABLE_FILE" ]
then
    echo "error: RANK_TABLE_FILE=$RANK_TABLE_FILE is not a file"
exit 1
fi


export DEVICE_NUM=8
export RANK_SIZE=$RANK_SIZE
export CONFIG_PATH=$CONFIG_PATH
export RANK_TABLE_FILE=$RANK_TABLE_FILE
export MINDSPORE_HCCL_CONFIG_PATH=$RANK_TABLE_FILE
export HCCL_EXEC_TIMEOUT=6000
export HCCL_CONNECT_TIMEOUT=1800
ulimit -n 102400

cpus=`cat /proc/cpuinfo| grep "processor"| wc -l`
avg=`expr $cpus \/ $RANK_SIZE`
gap=`expr $avg \- 1`
LOG_PATH=../output
LOG_PATH=$(get_real_path $LOG_PATH)
rm -rf $LOG_PATH
mkdir $LOG_PATH

for((i=0; i<${DEVICE_NUM}; i++))
do
    start=`expr $i \* $avg`
    end=`expr $start \+ $gap`
    cmdopt=$start"-"$end
    export DEVICE_ID=$i
    export RANK_ID=$((RANK_START + i))
    rm -rf ./train_parallel$i
    mkdir ./train_parallel$i
    cp ../*.py ./train_parallel$i
    pwd
    cp -rf ../models ../misc ../misc_utils ../med_lm_finetune ../configs ../data_utils ../examples ../models_old ../models_torch ./train_parallel$i
    cd ./train_parallel$i || exit
    echo "start training for SERVER_ID $SERVER_ID rank $RANK_ID, device $DEVICE_ID"
    env > env.log
    python3 train.py --config $CONFIG_PATH --exp_type $EXP_TYPE --exp_name $EXP_NAME --output_dir $LOG_PATH >log.txt  2>&1 & 
    cd ..
done

