#!/bin/bash

# Copyright 2019 Mobvoi Inc. All Rights Reserved.
. ./path.sh || exit 1;


# 1. 新增：定义要使用的GPU ID序列（可根据需求修改，示例为2、5、7）
# 后续将通过从零开始的index从该序列中获取具体GPU ID
GPU_IDS=(0 1 2 3 4 5 6 7)
# 计算GPU数量（即测试集切割份数）
GPU_NUM=${#GPU_IDS[@]}

# shellcheck disable=SC2034
NO_INFER=false

# 2. 修改split_data函数：切割份数改为GPU数量（原固定8份）
split_data() {
  local test_data=$1
  local result_dir=$2
  local testset_name=$3

  # Split the test data into ${GPU_NUM} parts（按GPU数量切割）
  local split_dir="$result_dir/splits"
  mkdir -p $split_dir

  # 统计总行数，按GPU数量计算每部分行数（向上取整，确保分配均匀）
  total_lines=$(wc -l < "$test_data")
  # 核心修改：将原固定7（8-1）改为GPU_NUM-1，切割份数=GPU数量
  lines_per_part=$(( (total_lines + GPU_NUM - 1) / GPU_NUM ))

  # 按行数分割，每个文件最多lines_per_part行
  split -l "$lines_per_part" -d "$test_data" "$split_dir/part_"
  # Return the directory with split files
  echo $split_dir
}
# 3. 修改run_process函数：GPU ID从GPU_IDS序列获取，循环次数改为GPU数量
run_process() {
  local test_data=$1
  local testset_name=$2
  local result_dir=$3

  echo "开始推理, 测试集: $testset_name, 测试数据: $test_data, 结果目录: $result_dir"
  echo "开始切割测试集（切割份数=${GPU_NUM}，与GPU数量一致）"
  # shellcheck disable=SC2155
  local split_dir=$(split_data $test_data $result_dir $testset_name)
  echo "切割完成, 切割结果: $split_dir"
  now_str=$(date +"%F_%H%M%S_%3N" | sed 's/-/_/g')
  log_dir="log/infer/$testset_name/$now_str"
  mkdir -p $log_dir
  # 核心修改1：循环范围改为0到GPU_NUM-1（原固定0-7）
  for index in $(seq 0 $((GPU_NUM - 1))); do
    local used_gpu=${GPU_IDS[$index]}
    local split_file="$split_dir/part_$(printf "%02d" $index)"
    local split_result_dir="$result_dir/part_$(printf "%02d" $index)"
    mkdir -p "$split_result_dir"
    echo "开始推理, part $index 分割文件: $split_file, 使用GPU: $used_gpu, 结果目录: $split_result_dir"

    if [ "$NO_INFER" = true ]; then
      echo "NO_INFER=true，跳过该分片推理"
    else
      # 定义日志文件路径
      local log_file="$log_dir/test_gpu${used_gpu}.log"

      # 判断是否为第一个进程 (index 0)
      if [ "$index" -eq 0 ]; then
        # 第一个进程：使用 tee 命令，同时输出到终端和日志文件
        nohup python wenet/bin/recognize.py --gpu "$used_gpu" \
          --modes $decode_modes \
          --config "$dir/train.yaml" \
          --data_type "$data_type" \
          --test_data "$split_file" \
          --checkpoint "$decode_checkpoint" \
          --beam_size 10 \
          --batch_size 16 \
          --blank_penalty 0.0 \
          --ctc_weight "$ctc_weight" \
          --reverse_weight "$reverse_weight" \
          --result_dir "$split_result_dir" \
          ${decoding_chunk_size:+--decoding_chunk_size "$decoding_chunk_size"} \
          2>&1 | tee "$log_file" &
      else
        # 其他进程：保持原有的重定向方式，只输出到日志文件
        nohup python wenet/bin/recognize.py --gpu "$used_gpu" \
          --modes $decode_modes \
          --config "$dir/train.yaml" \
          --data_type "$data_type" \
          --test_data "$split_file" \
          --checkpoint "$decode_checkpoint" \
          --beam_size 10 \
          --batch_size 16 \
          --blank_penalty 0.0 \
          --ctc_weight "$ctc_weight" \
          --reverse_weight "$reverse_weight" \
          --result_dir "$split_result_dir" \
          ${decoding_chunk_size:+--decoding_chunk_size "$decoding_chunk_size"} \
          > "$log_file" 2>&1 &
      fi
    fi
  done

  # Wait for all the inference jobs to finish
  wait
  tmp_text="./tmp_text_$testset_name"
  python tools/convert_jsonl_to_text.py $test_data $tmp_text


  for mode in ${decode_modes}; do
    local now_result_dir="$result_dir/$mode"
    mkdir -p $now_result_dir
    local merged_text="$now_result_dir/text"
    cat $result_dir/part_*/$mode/text > $merged_text

    # Compute WER for merged results
    # shellcheck disable=SC2086
    python tools/compute-wer.py --char=1 --v=1 "$tmp_text" $merged_text > $now_result_dir/wer
    echo "WER of $mode for $testset_name"
    tail -n 15 "$now_result_dir"/wer
  done
  rm "$tmp_text"
}



test_data_list=(
   "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/librispeech_clean/data.jsonl"
  "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/wenetspeech_meeting/data.jsonl"
  "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/aishell2_android/data.jsonl"
  "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/aishell2_ios/data.jsonl"
  "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/aishell2_mic/data.jsonl"
   "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_20250429_wenetspeech_test_net.jsonl"
   "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_aishell4.jsonl"
    "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_en_gigaspeech.jsonl"
   "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/aishell_test/data.list"
     "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20220407_librispeech_other_org_fbankhires_local.jsonl"
     "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_20250429_fleurs_en_tn.jsonl"
    "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_20250429_fleurs_zh.jsonl"
      "/apdcephfs_cq8/share_2906397/users/simonjjiang/codes/megatron_llm/Tealab_TTS/examples/version2/continuousTTS/data/common_voice_en_test/data.list"
)

testset_name_list=(
    "librispeech_clean"
   "wenetspeech_meeting"
   "aishell2_android"
   "aishell2_ios"
   "aishell2_mic"
    "testnet_test"
    "aishell4_test"
     "gigaspeech_test"
    "aishell1_test"
    "librispeech_other"
    "fleurs_en"
   "fleurs_zh"
    "common_voice_en_test"
)




# attention_rescoring ctc_prefix_beam_search ctc_greedy_search
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal
# ckpt_name="step_28499"
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_epoch1_step7199_10W_multi_node
# ckpt_name="step_40499"
# dir=/apdcephfs_qy3/share_976139/users/xuelonggeng/ckpt/asr_causal_d8
# ckpt_name=step_13499
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_epoch1_step7199_10W_multi_node
# ckpt_name=step_37499
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2
# ckpt_name=step_8999
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2
# ckpt_name=step_17999
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal
# ckpt_name=step_19499
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage3_causal
# ckpt_name=step_28499
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin
# ckpt_name=step_2999
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin
# ckpt_name=step_20999
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin
# ckpt_name=step_3999
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage2
# ckpt_name=step_25999

# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage3_true_hq
# ckpt_name=step_41999
dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage4_true_hq_with_en
ckpt_name=step_37999
# step_61999.pt
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage3_true_hq
# ckpt_name=step_61999
# dir=/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/120W_d8_causal_format_asr_fix_text_20Wjingpin_stage4_true_hq_with_en
# ckpt_name=step_59999


data_type="raw"
ctc_weight=0.3
reverse_weight=0.5
decoding_chunk_size=-1 # -1
decode_modes="ctc_greedy_search ctc_prefix_beam_search attention attention_rescoring" 

# shellcheck disable=SC2027
added_name="_decoding_chunk_size_"$decoding_chunk_size"_ctc_weight_"$ctc_weight"_reverse_weight_"$reverse_weight

decode_checkpoint=$dir/$ckpt_name.pt

# Create result directory
mkdir -p $dir

true_dir=$dir/$ckpt_name
mkdir -p $true_dir

echo $decode_checkpoint
echo "Decoding started..."

# Loop over the test datasets and run the inference on each of them
for i in "${!test_data_list[@]}"; do
  test_data="${test_data_list[$i]}"
  testset_name="${testset_name_list[$i]}"

  result_dir="$true_dir/$testset_name$added_name"
  mkdir -p $result_dir
  run_process $test_data $testset_name $result_dir
done
