#!/bin/bash

# Copyright 2019 Mobvoi Inc. All Rights Reserved.
. ./path.sh || exit 1;

# Use this to control how many gpu you use, It's 1-gpu training if you specify
# just 1gpu, otherwise it's is multiple gpu training based on DDP in pytorch
export CUDA_VISIBLE_DEVICES="7"

stage=5 # start from 0 if you need to start from data preparation
stop_stage=5

# You should change the following two parameters for multiple machine training,
# see https://pytorch.org/docs/stable/elastic/run.html
HOST_NODE_ADDR="localhost:0"
num_nodes=1
job_id=2023

# The aishell dataset location, please change this to your own path
# make sure of using absolute path. DO-NOT-USE relatvie path!
data=/export/data/asr-data/OpenSLR/33/
data_url=www.openslr.org/resources/33

nj=16
dict=data/units_en_cn.txt
#dict=data/units_paraformer.txt
bpe_model=data/en_cn_bpe.model

# data_type can be `raw` or `shard`. Typically, raw is used for small dataset,
# `shard` is used for large dataset which is over 1k hours, and `shard` is
# faster on reading data and training.
data_type=shard
num_utts_per_shard=1000

train_set=train

train_config=conf/train_whisper_medium.yaml
cmvn=false
dir=exp/en_cn
tensorboard_dir=tensorboard
checkpoint=
num_workers=8
prefetch=500

# use average_checkpoint will get better result
average_checkpoint=false
decode_checkpoint=$dir/9.pt
decode_checkpoint_name=9pt
average_num=30
decode_modes="ctc_greedy_search ctc_prefix_beam_search attention attention_rescoring"

train_engine=torch_ddp

deepspeed_config=conf/ds_stage2.json
deepspeed_save_states="model_only"

. tools/parse_options.sh || exit 1;





if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
  mkdir -p $dir
  num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
  # Use "nccl" if it works, otherwise use "gloo"
  # NOTE(xcsong): deepspeed fails with gloo, see
  #   https://github.com/microsoft/DeepSpeed/issues/2818
  dist_backend="nccl"
  cmvn_opts=
  $cmvn && cp data/${train_set}/global_cmvn4paraformer $dir/global_cmvn
  $cmvn && cmvn_opts="--cmvn ${dir}/global_cmvn"

  # train.py rewrite $train_config to $dir/train.yaml with model input
  # and output dimension, and $dir/train.yaml will be used for inference
  # and export.
  if [ ${train_engine} == "deepspeed" ]; then
    echo "$0: using deepspeed"
  else
    echo "$0: using torch ddp"
  fi

  echo "$0: num_nodes is $num_nodes, proc_per_node is $num_gpus"
  torchrun --nnodes=$num_nodes --nproc_per_node=$num_gpus \
           --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint=$HOST_NODE_ADDR \
    wenet/bin/train.py \
      --train_engine ${train_engine} \
      --config $train_config \
      --data_type  $data_type \
      --symbol_table  ${dict} \
      --bpe_model ${bpe_model} \
      --train_data data/asru_train.shards \
      --cv_data data/asru_dev.shards \
      ${checkpoint:+--checkpoint $checkpoint} \
      --model_dir $dir \
      --tensorboard_dir ${tensorboard_dir} \
      --ddp.dist_backend $dist_backend \
      --num_workers ${num_workers} \
      --prefetch ${prefetch} \
      $cmvn_opts \
      --pin_memory \
      --deepspeed_config ${deepspeed_config} \
      --deepspeed.save_states ${deepspeed_save_states}
fi

test_sets_all=("aishell1" "aishell2" "SPEECHIO_ASR_ZH00000" "SPEECHIO_ASR_ZH00001" "SPEECHIO_ASR_ZH00002" "SPEECHIO_ASR_ZH00003" "SPEECHIO_ASR_ZH00004" "SPEECHIO_ASR_ZH00005" "test_meeting" "test_net")
test_sets=( "SPEECHIO_ASR_ZH00000" "SPEECHIO_ASR_ZH00001" "SPEECHIO_ASR_ZH00002" "SPEECHIO_ASR_ZH00003" "SPEECHIO_ASR_ZH00004" "SPEECHIO_ASR_ZH00005" "test_meeting" "test_net" )

if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
  # Test model, please specify the model you want to test by --checkpoint
  cmvn_opts=
#  $cmvn && cmvn_opts="--cmvn data/${train_set}/global_cmvn"
  # TODO, Add model average here
  # mkdir -p $dir/test
  # if [ ${average_checkpoint} == true ]; then
  #   decode_checkpoint=$dir/avg_${average_num}.pt
  #   echo "do model average and final checkpoint is $decode_checkpoint"
  #   python wenet/bin/average_model.py \
  #     --dst_model $decode_checkpoint \
  #     --src_path $dir  \
  #     --num ${average_num} \
  #     --val_best
  # fi
  # Specify decoding_chunk_size if it's a unified dynamic chunk trained model
  # -1 for full chunk
  decoding_chunk_size=
  ctc_weight=0.5
  # Polling GPU id begin with index 0
  for test_set in "${test_sets[@]}"; do
  {
    echo "test this dataset: $test_set"
    test_dir=$dir/test_${decode_checkpoint_name}/${test_set}
#    wer_path=$test_dir/wer
#    if [ -e "$wer_path" ]; then
#      echo "$wer_path 文件已存在，跳过对该数据集的推理"
#      continue
#    fi
    mkdir -p $test_dir
    export CUDA_VISIBLE_DEVICES="7"
    python wenet/bin/recognize.py --gpu 7 \
      --mode $decode_modes \
      --config $dir/train.yaml \
      --data_type raw \
      --dict $dict \
      --bpe_model ${bpe_model} \
      --test_data data/test/$test_set/data.list \
      --checkpoint $decode_checkpoint \
      --beam_size 10 \
      --batch_size 1 \
      --penalty 0.0 \
      --result_dir $test_dir \
      --ctc_weight $ctc_weight \
      # --test_data data/test/$test_set/data.list \

    # cut -f2- -d " " $test_dir/text_bpe > $test_dir/text_bpe_value_tmp
    # cut -f1 -d " " $test_dir/text_bpe > $test_dir/text_bpe_key_tmp
    # tools/spm_decode --model=${bpemodel}.model --input_format=piece \
    #   < $test_dir/text_bpe_value_tmp | sed -e "s/▁/ /g" > $test_dir/text_value_tmp
    # paste -d " " $test_dir/text_bpe_key_tmp $test_dir/text_value_tmp > $test_dir/text

#    python tools/compute-wer.py --char=1 --v=1 \
#      data/test/$test_set/text $test_dir/text > $test_dir/wer
    big_test_dir=$test_dir
    for mode in ${decode_modes}; do
      test_dir=$big_test_dir/$mode
      cp $test_dir/text $test_dir/text_bpe
      cut -f2- -d " " $test_dir/text_bpe > $test_dir/text_bpe_value_tmp
      cut -f1 -d " " $test_dir/text_bpe > $test_dir/text_bpe_key_tmp
      tools/spm_decode --model=/home/work_nfs7/yhliang/wenet-main/examples/aishell/s0/data/bpe/unigram2000.model --input_format=piece \
        < $test_dir/text_bpe_value_tmp | sed -e "s/▁/ /g" > $test_dir/text_value_tmp
      paste -d " " $test_dir/text_bpe_key_tmp $test_dir/text_value_tmp > $test_dir/text
      python tools/compute-wer.py --char=1 --v=1 \
        data/test/$test_set/text $test_dir/text > $test_dir/wer
      tail -n 6 $test_dir/wer
    done

    echo "$test_set has been decoded!"
  }
  done
  wait

fi


if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
  echo '开始导出模型'
  # Export the best model you want
  python wenet/bin/export_jit.py \
    --config $dir/train.yaml \
    --checkpoint $dir/9.pt \
    --output_file $dir/final.zip \
    --output_quant_file $dir/final_quant.zip
fi

if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
  export GLOG_logtostderr=1
  export GLOG_v=2
  wav_path=/home/work_nfs7/xlgeng/workspace/wenet-sanm/examples/aishell/en_cn/data/asru_test.scp
  model_dir=/home/work_nfs7/xlgeng/workspace/wenet-sanm/examples/aishell/en_cn/exp/en_cn
  /home/work_nfs7/xlgeng/workspace/wenet-sanm/runtime/libtorch/build/bin/decoder_main \
      --chunk_size -1 \
      --wav_scp $wav_path \
      --model_path $model_dir/final.zip \
      --unit_path /home/work_nfs7/xlgeng/workspace/wenet-sanm/examples/aishell/en_cn/data/units_en_cn.txt \
      --result  /home/work_nfs7/xlgeng/workspace/wenet-sanm/examples/aishell/en_cn/data/jit_out_text

fi

# Optionally, you can add LM and test it with runtime.
if [ ${stage} -le 17 ] && [ ${stop_stage} -ge 17 ]; then
  # 7.1 Prepare dict
  unit_file=$dict
  mkdir -p data/local/dict
  cp $unit_file data/local/dict/units.txt
  tools/fst/prepare_dict.py $unit_file ${data}/resource_aishell/lexicon.txt \
    data/local/dict/lexicon.txt
  # 7.2 Train lm
  lm=data/local/lm
  mkdir -p $lm
  tools/filter_scp.pl data/train/text \
    $data/data_aishell/transcript/aishell_transcript_v0.8.txt > $lm/text
  local/aishell_train_lms.sh
  # 7.3 Build decoding TLG
  tools/fst/compile_lexicon_token_fst.sh \
    data/local/dict data/local/tmp data/local/lang
  tools/fst/make_tlg.sh data/local/lm data/local/lang data/lang_test || exit 1;
  # 7.4 Decoding with runtime
  chunk_size=-1
  ./tools/decode.sh --nj 16 \
    --beam 15.0 --lattice_beam 7.5 --max_active 7000 \
    --blank_skip_thresh 0.98 --ctc_weight 0.5 --rescoring_weight 1.0 \
    --chunk_size $chunk_size \
    --fst_path data/lang_test/TLG.fst \
    --dict_path data/lang_test/words.txt \
    data/test/wav.scp data/test/text $dir/final.zip \
    data/lang_test/units.txt $dir/lm_with_runtime
  # Please see $dir/lm_with_runtime for wer
fi

# Optionally, you can decode with k2 hlg
if [ ${stage} -le 8 ] && [ ${stop_stage} -ge 8 ]; then
  if [ ! -f data/local/lm/lm.arpa ]; then
    echo "Please run prepare dict and train lm in Stage 7" || exit 1;
  fi

  # 8.1 Build decoding HLG
  required="data/local/hlg/HLG.pt data/local/hlg/words.txt"
  for f in $required; do
    if [ ! -f $f ]; then
      tools/k2/make_hlg.sh data/local/dict/ data/local/lm/ data/local/hlg
      break
    fi
  done

  # 8.2 Decode using HLG
  decoding_chunk_size=
  lm_scale=0.7
  decoder_scale=0.1
  r_decoder_scale=0.7
  decode_modes="hlg_onebest hlg_rescore"
  python wenet/bin/recognize.py --gpu 0 \
    --modes $decode_modes \
    --config $dir/train.yaml \
    --data_type $data_type \
    --test_data data/test/data.list \
    --checkpoint $decode_checkpoint \
    --beam_size 10 \
    --batch_size 16 \
    --penalty 0.0 \
    --dict $dict \
    --word data/local/hlg/words.txt \
    --hlg data/local/hlg/HLG.pt \
    --lm_scale $lm_scale \
    --decoder_scale $decoder_scale \
    --r_decoder_scale $r_decoder_scale \
    --result_dir $dir \
    ${decoding_chunk_size:+--decoding_chunk_size $decoding_chunk_size}
  for mode in ${decode_modes}; do
    python tools/compute-wer.py --char=1 --v=1 \
      data/test/text $dir/$mode/text > $dir/$mode/wer
  done
fi

# Optionally, you can train with LF-MMI using k2
# Based on 20210601_u2++_conformer_exp/final.pt, we train 50 epocs with 1e-5 lr
# and average 10 best models, achieve 4.11 cer with hlg decoding
# Actually, you can achieve even lower cer by tuning lm_scale/decoder_scale/r_decoder_scale
if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then
  # 9.1 Build token level bigram fst for LF-MMI training
  tools/k2/prepare_mmi.sh data/train/ data/dev data/local/lfmmi

  # 9.2 Run LF-MMI training from stage 4, with below new args
  # --lfmmi_dir data/local/lfmmi

  # 9.3 Run HLG decode from stage 8.2
fi
