#!/bin/bash

# Copyright 2019 Mobvoi Inc. All Rights Reserved.
. ./path.sh || exit 1;

# Automatically detect number of gpus
if command -v nvidia-smi &> /dev/null; then
  num_gpus=$(nvidia-smi -L | wc -l)
  gpu_list=$(seq -s, 0 $((num_gpus-1)))
else
  num_gpus=-1
  gpu_list="-1"
fi
# You can also manually specify CUDA_VISIBLE_DEVICES
# if you don't want to utilize all available GPU resources.
export CUDA_VISIBLE_DEVICES="${gpu_list}"
echo "CUDA_VISIBLE_DEVICES is ${CUDA_VISIBLE_DEVICES}"

stage=4 # start from 0 if you need to start from data preparation
stop_stage=4

# You should change the following two parameters for multiple machine training,
# see https://pytorch.org/docs/stable/elastic/run.html
export NCCL_DEBUG=ERROR
# export NCCL_DEBUG=ERROR
export NCCL_IB_GID_INDEX=3
export NCCL_IB_SL=3
export NCCL_CHECKS_DISABLE=1
export NCCL_P2P_DISABLE=0
export NCCL_IB_DISABLE=0
export NCCL_LL_THRESHOLD=16384
export NCCL_IB_CUDA_SUPPORT=1
export NCCL_SOCKET_IFNAME=bond1
export UCX_NET_DEVICES=bond1
export NCCL_IB_HCA=mlx5_bond_1,mlx5_bond_5,mlx5_bond_3,mlx5_bond_7,mlx5_bond_4,mlx5_bond_8,mlx5_bond_2,mlx5_bond_6
export NCCL_COLLNET_ENABLE=0
export SHARP_COLL_ENABLE_SAT=0
export NCCL_NET_GDR_LEVEL=2
export NCCL_IB_QPS_PER_CONNECTION=4
export NCCL_IB_TC=160
export NCCL_PXN_DISABLE=1
export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
export NCCL_TIMEOUT=6000000
#HOST_NODE_ADDR="28.45.33.95:29406"
num_nodes=8
job_id=2023

nj=16
dict=dict/tokens_10.15.txt

master_host="28.48.51.79"
host_port="29417"
rank_id=0

# Optional train_config
# 1. conf/train_transformer.yaml: Standard transformer
# 2. conf/train_conformer.yaml: Standard conformer
# 3. conf/train_unified_conformer.yaml: Unified dynamic chunk causal conformer
# 4. conf/train_unified_transformer.yaml: Unified dynamic chunk transformer
# 5. conf/train_u2++_conformer.yaml: U2++ conformer
# 6. conf/train_u2++_transformer.yaml: U2++ transformer
# 7. conf/train_u2++_conformer.yaml: U2++ lite conformer, must load a well
#    trained model, and freeze encoder module, otherwise there will be a
#    autograd error
train_config=conf/train_u2++_moe_conformer_tokenizer.yaml
#dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/asr_causal_d8
#dir=/apdcephfs_qy3/share_976139/users/xuelonggeng/ckpt/asr_causal_d8
# 120W_d8_causal_format_asr_fix_text_20Wjingpin
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage3_true_hq
dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_8Wzh3WenHq_100Wcoarse_11Whq_20251114
mkdir -p $dir
tensorboard_dir=tensorboard
#checkpoint=/apdcephfs_qy3/share_976139/users/xuelonggeng/ckpt/moe_ssl_new_model_with_bert_test4_epoch1_on_h20_init_from_epoch0/step_7199_wenet.pt
# checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_epoch1_step7199_10W_multi_node/step_40499.pt
# checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W/step_46499.pt
#checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2/step_17999.pt
# checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal/step_28499.pt
# checkpoint=/apdcephfs_qy3/share_976139/users/xuelonggeng/ckpt/asr_causal_d8/step_16499.pt
# checkpoint=$dir/step_8999.pt
# checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin/step_20999.pt
# checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin/step_3999.pt
# checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage2/step_25999.pt
# checkpoint=$dir/step_13999.pt
#checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage3_true_hq/step_61999.pt
checkpoint=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage4_true_hq_with_en/step_63999.pt
num_workers=4
prefetch=10


# data_type can be `raw` or `shard`. Typically, raw is used for small dataset,
# `shard` is used for large dataset which is over 1k hours, and `shard` is
# faster on reading data and training.
#train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/100W_asr_data/zh_en.100w_1000
#train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/data.list.train_1000
# train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/120W4ssl/120W.list
# train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/data.list.train_multi100
#train_data_path=/mnt/apdcephfs_sgfd/share_304127040/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet.train_multi100
train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/111W_asr_coarse100W_hq11W/parquet.list
# train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/data_tmp.jsonl
# train_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet/tar_file.list
# cv_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/20W_asr_data_hq/data.list.dev_3
cv_data_path=/mnt/apdcephfs_sgfd/share_304127040/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet.dev
# cv_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/data_dev.list
# cv_data_path=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/data/8W_asr_data_hq/parquet/tar_file_dev.list
mkdir -p $dir/data
# head -n 1 $train_data_path > $cv_data_path
data_type=parquet # parquet # parquet

# use average_checkpoint will get better result
average_checkpoint=true
decode_checkpoint=$dir/final.pt
average_num=30
decode_modes="ctc_greedy_search ctc_prefix_beam_search attention attention_rescoring"

train_engine=deepspeed # deepspeed

deepspeed_config=conf/ds_stage1.json
deepspeed_save_states="model_only"

. tools/parse_options.sh || exit 1;
echo "num_nodes: $num_nodes"
echo "rank_id: $rank_id"


if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
  echo "开始训练"
  num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
  # Use "nccl" if it works, otherwise use "gloo"
  # NOTE(xcsong): deepspeed fails with gloo, see
  #   https://github.com/microsoft/DeepSpeed/issues/2818
  dist_backend="nccl"

  # train.py rewrite $train_config to $dir/train.yaml with model input
  # and output dimension, and $dir/train.yaml will be used for inference
  # and export.
  if [ ${train_engine} == "deepspeed" ]; then
    echo "$0: using deepspeed"
  else
    echo "$0: using torch ddp"
  fi

  # NOTE(xcsong): Both ddp & deepspeed can be launched by torchrun
  # NOTE(xcsong): To unify single-node & multi-node training, we add
  #               all related args. You should change `nnodes` &
  #               `rdzv_endpoint` for multi-node, see
  #               https://pytorch.org/docs/stable/elastic/run.html#usage
  #               https://github.com/wenet-e2e/wenet/pull/2055#issuecomment-1766055406
  #               `rdzv_id` - A user-defined id that uniquely identifies the worker group for a job.
  #                           This id is used by each node to join as a member of a particular worker group.
  #               `rdzv_endpoint` - The rendezvous backend endpoint; usually in form <host>:<port>.
  # NOTE(xcsong): In multi-node training, some clusters require special NCCL variables to set prior to training.
  #               For example: `NCCL_IB_DISABLE=1` + `NCCL_SOCKET_IFNAME=enp` + `NCCL_DEBUG=INFO`
  #               without NCCL_IB_DISABLE=1
  #                   RuntimeError: NCCL error in: ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1269, internal error, NCCL Version xxx
  #               without NCCL_SOCKET_IFNAME=enp  (IFNAME could be get by `ifconfig`)
  #                   RuntimeError: The server socket has failed to listen on any local network address. The server socket has failed to bind to [::]:xxx
  #               ref: https://github.com/google/jax/issues/13559#issuecomment-1343573764
  echo "$0: num_nodes is $num_nodes, proc_per_node is $num_gpus"
#  torchrun --nnodes=$num_nodes --nproc_per_node=$num_gpus \
#           --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint=$HOST_NODE_ADDR \
  torchrun --nnodes=$num_nodes --nproc_per_node=$num_gpus --node_rank=$rank_id \
      --master_addr=$master_host --master_port=$host_port \
    wenet/bin/train.py \
      --train_engine ${train_engine} \
      --config $train_config \
      --data_type  $data_type \
      --train_data $train_data_path \
      --cv_data $cv_data_path \
      ${checkpoint:+--checkpoint $checkpoint} \
      --model_dir $dir \
      --tensorboard_dir ${tensorboard_dir} \
      --ddp.dist_backend $dist_backend \
      --num_workers ${num_workers} \
      --prefetch ${prefetch} \
      --pin_memory \
      --timeout 5000 \
      --deepspeed_config ${deepspeed_config} \
      --deepspeed.save_states ${deepspeed_save_states}
fi

if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
  # Test model, please specify the model you want to test by --checkpoint
  if [ ${average_checkpoint} == true ]; then
    decode_checkpoint=$dir/avg_${average_num}.pt
    echo "do model average and final checkpoint is $decode_checkpoint"
    python wenet/bin/average_model.py \
      --dst_model $decode_checkpoint \
      --src_path $dir  \
      --num ${average_num} \
      --val_best
  fi
  # Please specify decoding_chunk_size for unified streaming and
  # non-streaming model. The default value is -1, which is full chunk
  # for non-streaming inference.
  decoding_chunk_size=
  ctc_weight=0.3
  reverse_weight=0.5
  python wenet/bin/recognize.py --gpu 0 \
    --modes $decode_modes \
    --config $dir/train.yaml \
    --data_type $data_type \
    --test_data data/test/data.list \
    --checkpoint $decode_checkpoint \
    --beam_size 10 \
    --batch_size 32 \
    --blank_penalty 0.0 \
    --ctc_weight $ctc_weight \
    --reverse_weight $reverse_weight \
    --result_dir $dir \
    ${decoding_chunk_size:+--decoding_chunk_size $decoding_chunk_size}
  for mode in ${decode_modes}; do
    python tools/compute-wer.py --char=1 --v=1 \
      data/test/text $dir/$mode/text > $dir/$mode/wer
  done
fi


if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
  # Export the best model you want
  python wenet/bin/export_jit.py \
    --config $dir/train.yaml \
    --checkpoint $dir/avg_${average_num}.pt \
    --output_file $dir/final.zip \
    --output_quant_file $dir/final_quant.zip
fi

# Optionally, you can add LM and test it with runtime.
if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
  # 7.1 Prepare dict
  unit_file=$dict
  mkdir -p data/local/dict
  cp $unit_file data/local/dict/units.txt
  tools/fst/prepare_dict.py $unit_file ${data}/resource_aishell/lexicon.txt \
    data/local/dict/lexicon.txt
  # 7.2 Train lm
  lm=data/local/lm
  mkdir -p $lm
  tools/filter_scp.pl data/train/text \
    $data/data_aishell/transcript/aishell_transcript_v0.8.txt > $lm/text
  local/aishell_train_lms.sh
  # 7.3 Build decoding TLG
  tools/fst/compile_lexicon_token_fst.sh \
    data/local/dict data/local/tmp data/local/lang
  tools/fst/make_tlg.sh data/local/lm data/local/lang data/lang_test || exit 1;
  # 7.4 Decoding with runtime
  chunk_size=-1
  ./tools/decode.sh --nj 16 \
    --beam 15.0 --lattice_beam 7.5 --max_active 7000 \
    --blank_skip_thresh 0.98 --ctc_weight 0.5 --rescoring_weight 1.0 \
    --chunk_size $chunk_size \
    --fst_path data/lang_test/TLG.fst \
    --dict_path data/lang_test/words.txt \
    data/test/wav.scp data/test/text $dir/final.zip \
    data/lang_test/units.txt $dir/lm_with_runtime
  # Please see $dir/lm_with_runtime for wer
fi

# Optionally, you can decode with k2 hlg
if [ ${stage} -le 8 ] && [ ${stop_stage} -ge 8 ]; then
  if [ ! -f data/local/lm/lm.arpa ]; then
    echo "Please run prepare dict and train lm in Stage 7" || exit 1;
  fi

  # 8.1 Build decoding HLG
  required="data/local/hlg/HLG.pt data/local/hlg/words.txt"
  for f in $required; do
    if [ ! -f $f ]; then
      tools/k2/make_hlg.sh data/local/dict/ data/local/lm/ data/local/hlg
      break
    fi
  done

  # 8.2 Decode using HLG
  decoding_chunk_size=
  lm_scale=0.7
  decoder_scale=0.1
  r_decoder_scale=0.7
  decode_modes="hlg_onebest hlg_rescore"
  python wenet/bin/recognize.py --gpu 0 \
    --modes $decode_modes \
    --config $dir/train.yaml \
    --data_type $data_type \
    --test_data data/test/data.list \
    --checkpoint $decode_checkpoint \
    --beam_size 10 \
    --batch_size 16 \
    --blank_penalty 0.0 \
    --dict $dict \
    --word data/local/hlg/words.txt \
    --hlg data/local/hlg/HLG.pt \
    --lm_scale $lm_scale \
    --decoder_scale $decoder_scale \
    --r_decoder_scale $r_decoder_scale \
    --result_dir $dir \
    ${decoding_chunk_size:+--decoding_chunk_size $decoding_chunk_size}
  for mode in ${decode_modes}; do
    python tools/compute-wer.py --char=1 --v=1 \
      data/test/text $dir/$mode/text > $dir/$mode/wer
  done
fi

# Optionally, you can train with LF-MMI using k2
# Based on 20210601_u2++_conformer_exp/final.pt, we train 50 epocs with 1e-5 lr
# and average 10 best models, achieve 4.11 cer with hlg decoding
# Actually, you can achieve even lower cer by tuning lm_scale/decoder_scale/r_decoder_scale
if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then
  # 9.1 Build token level bigram fst for LF-MMI training
  tools/k2/prepare_mmi.sh data/train/ data/dev data/local/lfmmi

  # 9.2 Run LF-MMI training from stage 4, modify below args in train.yaml
  # model: k2_model
  # model_conf:
  #   lfmmi_dir data/local/lfmmi

  # 9.3 Run HLG decode from stage 8.2
fi
