#!/bin/bash

. ./path.sh || exit 1;

export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
stage=4 # start from 0 if you need to start from data_list preparation
stop_stage=4

#mkdir -p $old_dir
train_config=conf/whisper_mms_phi3b_v9.yaml
dir=/home/work_nfs11/hfxue/checkpoint/wenet_MLS2T_LLM/Fleurs_whispermms_v1_largegrad/stage1
dir=/home/work_nfs11/hfxue/checkpoint/wenet_MLS2T_LLM/MLlibrispeech_whispermms_v8_lidinitweight_no/stage1

mkdir -p $dir

data_type=shard # raw or shard, shard_mix_task
train_data=1wenglish_other7/train/data_english1w_other7.list
cv_data=1wenglish_other7/dev/data.list

#train_data=fleurs/train/data.list
#cv_data=fleurs/dev/data.list
num_workers=6  # 数据加载的进程数
prefetch=100
checkpoint= #$dir/step_24000.pt #/home/work_nfs11/hfxue/checkpoint/wenet_MLS2T_LLM/MLlibrispeech_whispermms_v4_fix/stage1/epoch_0.pt
# 耿雪龙
train_engine="torch_ddp" # deepspeed or torch_ddp

HOST_NODE_ADDR="localhost:0"
num_nodes=1
job_id=2024

#cmvn=false   cmvn的配置信息也转放到了configs
#do_delta=false

deepspeed_config=conf/ds_stage2.json
deepspeed_save_states="model_only"

. tools/parse_options.sh || exit 1;

set -e
set -u
set -o pipefail


if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
  mkdir -p $dir
  num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
  dist_backend="nccl"

  if [ ${train_engine} == "deepspeed" ]; then
    echo "$0: using deepspeed"
  else
    echo "$0: using torch ddp"
  fi

  echo "$0: num_nodes is $num_nodes, proc_per_node is $num_gpus"
  torchrun  --standalone  --nnodes=$num_nodes --nproc_per_node=$num_gpus \
           --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint=$HOST_NODE_ADDR \
    wenet/bin/train.py \
      --train_engine ${train_engine} \
      --config $train_config \
      --data_type  $data_type \
      --train_data $train_data \
      --cv_data $cv_data \
      ${checkpoint:+--checkpoint $checkpoint} \
      --model_dir $dir \
      --tensorboard_dir $dir/tensorboard \
      --ddp.dist_backend $dist_backend \
      --num_workers ${num_workers} \
      --prefetch ${prefetch} \
      --pin_memory \
      --deepspeed_config ${deepspeed_config} \
      --deepspeed.save_states ${deepspeed_save_states} \
      --timeout 1200 \
      --use_amp
fi




