#!/bin/bash

# Copyright 2019 Mobvoi Inc. All Rights Reserved.
. ./path.sh || exit 1;
cd ../
# Use this to control how many gpu you use, It's 1-gpu training if you specify
# just 1gpu, otherwise it's is multiple gpu training based on DDP in pytorch
#export CUDA_VISIBLE_DEVICES="2"

stage=5 # start from 0 if you need to start from data preparation
stop_stage=5

work_dir=/home/work_nfs7/xlgeng/new_workspace/wenet_gxl_en_cn/examples/aishell/en_cn
nj=16
dict=$work_dir/data_list/units_en_cn.txt
#dict=data/units_paraformer.txt
bpe_model=$work_dir/data_list/en_cn_bpe.model

data_type=shard

train_config=$work_dir/conf/train_whisper_medium_streaming.yaml
cmvn=false
dir=/home/work_nfs6/xlgeng/new_workspace/wenet_gxl_en_cn/streaming_fbank_exp

checkpoint=
num_workers=8
prefetch=500

# use average_checkpoint will get better result
average_checkpoint=false
decode_checkpoint=$dir/epoch_9.pt
decode_checkpoint_name=9pt
average_num=30
decode_modes="ctc_greedy_search ctc_prefix_beam_search attention attention_rescoring"

train_engine=torch_ddp

deepspeed_config=conf/ds_stage2.json
deepspeed_save_states="model_only"

. tools/parse_options.sh || exit 1;

test_sets_all=("aishell1" "aishell2" "SPEECHIO_ASR_ZH00000" "SPEECHIO_ASR_ZH00001" "SPEECHIO_ASR_ZH00002" "SPEECHIO_ASR_ZH00003" "SPEECHIO_ASR_ZH00004" "test_meeting" "test_net")
test_sets=("aishell2" )

if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
  cmvn_opts=
  decoding_chunk_size=
  ctc_weight=0.5
  # Polling GPU id begin with index 0
  for test_set in "${test_sets[@]}"; do
  {
    echo "test this dataset: $test_set"
    test_dir=$dir/test_${decode_checkpoint_name}/${test_set}
    wer_path=$test_dir/wer
    if [ -e "$wer_path" ]; then
      echo "$wer_path 文件已存在，跳过对该数据集的推理"
      continue
    fi
    mkdir -p $test_dir
    gpu_id=1
    export CUDA_VISIBLE_DEVICES="$gpu_id"
    python wenet/bin/recognize.py --gpu $gpu_id \
      --mode $decode_modes \
      --config $dir/train.yaml \
      --data_type raw \
      --test_data $work_dir/data_list/test/$test_set/data.list \
      --checkpoint $decode_checkpoint \
      --beam_size 10 \
      --batch_size 1 \
      --penalty 0.0 \
      --result_dir $test_dir \
      --ctc_weight $ctc_weight \


    big_test_dir=$test_dir
    for mode in ${decode_modes}; do
      test_dir=$big_test_dir/$mode
      cp $test_dir/text $test_dir/text_bpe
      cut -f2- -d " " $test_dir/text_bpe > $test_dir/text_bpe_value_tmp
      cut -f1 -d " " $test_dir/text_bpe > $test_dir/text_bpe_key_tmp
      tools/spm_decode --model=/home/work_nfs7/yhliang/wenet-main/examples/aishell/s0/data/bpe/unigram2000.model --input_format=piece \
        < $test_dir/text_bpe_value_tmp | sed -e "s/▁/ /g" > $test_dir/text_value_tmp
      paste -d " " $test_dir/text_bpe_key_tmp $test_dir/text_value_tmp > $test_dir/text
      python tools/compute-wer.py --char=1 --v=1 \
        data_list/test/$test_set/text $test_dir/text > $test_dir/wer
      tail -n 6 $test_dir/wer
    done

    echo "$test_set has been decoded!"
  }
  done
  wait

fi
