#!/bin/bash

# Copyright 2019 Mobvoi Inc. All Rights Reserved.
. ./path.sh || exit 1;


# Define the test datasets and their corresponding testset names
test_data_0="/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_20250429_wenetspeech_test_net.jsonl"
testset_name_0="testnet_test"
test_data_1="/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_aishell4.jsonl"
testset_name_1="aishell4_test"
test_data_2="/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_en_gigaspeech.jsonl"
testset_name_2="gigaspeech_test"
test_data_3="/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/aishell_test/data.list"
testset_name_3="aishell1_test"


test_data_list=[
        "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/librispeechh_clean/test_20220407_librispeech_clean_org_fbankhires.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20220407_librispeech_other_org_fbankhires_local.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20211229_wenetspeech_meeting_org_fbankhires.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_android.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_ios.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_mic.jsonl"
    ]
    testset_name_list=[
        "librispeech_clean",
        "librispeech_other",
        "wenetspeech_meeting",
        "aishell2_android",
        "aishell2_ios",
        "aishell2_mic"
    ]

data_type="raw"
ctc_weight=0.3
reverse_weight=0.5
decoding_chunk_size=-1
decode_modes="ctc_greedy_search ctc_prefix_beam_search attention attention_rescoring"
decode_modes="attention_rescoring"
dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2
dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal
decode_checkpoint=$dir/step_19499.pt
# Create result directory
mkdir -p $dir

echo $decode_checkpoint
echo "Decoding started..."
# Define the function to run the process for each GPU
run_process() {
  local gpu_id=$1
  local test_data=$2
  local testset_name=$3
  local result_dir="$dir/$testset_name"
  
  mkdir -p $result_dir
  
  # Run the inference with nohup in the background
  nohup python wenet/bin/recognize.py --gpu $gpu_id \
    --modes $decode_modes \
    --config $dir/train.yaml \
    --data_type $data_type \
    --test_data $test_data \
    --checkpoint $decode_checkpoint \
    --beam_size 10 \
    --batch_size 64 \
    --blank_penalty 0.0 \
    --ctc_weight $ctc_weight \
    --reverse_weight $reverse_weight \
    --result_dir $result_dir \
    ${decoding_chunk_size:+--decoding_chunk_size $decoding_chunk_size} > "log/test_${gpu_id}.log" 2>&1 &

  # # After the inference is done, process WER calculations

  wait  # Ensure the inference process completes before proceeding

  tmp_text="./tmp_text_$testset_name"
  python tools/convert_jsonl_to_text.py $test_data $tmp_text
  for mode in ${decode_modes}; do
    python tools/compute-wer.py --char=1 --v=1 \
      $tmp_text $result_dir/$mode/text > $result_dir/$mode/wer
    echo "wer of $mode for $testset_name"
    tail -n 15 $result_dir/$mode/wer
  done
  # Clean up temporary text file
  rm $tmp_text
}
test_data_list=(
    "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/librispeechh_clean/test_20220407_librispeech_clean_org_fbankhires.jsonl"
    "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20220407_librispeech_other_org_fbankhires_local.jsonl"
    "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20211229_wenetspeech_meeting_org_fbankhires.jsonl"
    "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_android.jsonl"
    "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_ios.jsonl"
    "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_mic.jsonl"
)

testset_name_list=(
    "librispeech_clean"
    "librispeech_other"
    "wenetspeech_meeting"
    "aishell2_android"
    "aishell2_ios"
    "aishell2_mic"
)
# Run the processes concurrently for each GPU
run_process 0 $test_data_0 $testset_name_0 &
run_process 1 $test_data_1 $testset_name_1 &
run_process 2 $test_data_2 $testset_name_2 &
run_process 3 $test_data_3 $testset_name_3 &
run_process 4 "${test_data_list[0]}" "${testset_name_list[0]}" &  # 对应索引4：librispeech_clean
run_process 5 "${test_data_list[1]}" "${testset_name_list[1]}" &  # 对应索引5：librispeech_other
run_process 6 "${test_data_list[2]}" "${testset_name_list[2]}" &  # 对应索引6：wenetspeech_meeting
run_process 7 "${test_data_list[3]}" "${testset_name_list[3]}" &  # 对应索引7：aishell2_android
run_process 0 "${test_data_list[4]}" "${testset_name_list[4]}" &  # 对应索引8：aishell2_ios
run_process 1 "${test_data_list[5]}" "${testset_name_list[5]}" &  # 对应索引9：aishell2_mic

# Wait for all background processes to finish
wait
