#!/usr/bin/env python3
# Copyright      2022-2023  Xiaomi Corp.        (authors: Fangjun Kuang,
#                                                         Zengwei Yao)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script loads a checkpoint and uses it to decode waves.
You can generate the checkpoint with the following command:

- For non-streaming model:

./zipformer/export.py \
  --exp-dir ./zipformer/exp \
  --use-ctc 1 \
  --tokens data/lang_bpe_500/tokens.txt \
  --epoch 30 \
  --avg 9 \
  --jit 1

- For streaming model:

./zipformer/export.py \
  --exp-dir ./zipformer/exp \
  --use-ctc 1 \
  --causal 1 \
  --tokens data/lang_bpe_500/tokens.txt \
  --epoch 30 \
  --avg 9 \
  --jit 1

Usage of this script:

(1) ctc-decoding
./zipformer/jit_pretrained_ctc.py \
  --model-filename ./zipformer/exp/jit_script.pt \
  --tokens data/lang_bpe_500/tokens.txt \
  --method ctc-decoding \
  --sample-rate 16000 \
  /path/to/foo.wav \
  /path/to/bar.wav

(2) 1best
./zipformer/jit_pretrained_ctc.py \
  --model-filename ./zipformer/exp/jit_script.pt \
  --HLG data/lang_bpe_500/HLG.pt \
  --words-file data/lang_bpe_500/words.txt  \
  --method 1best \
  --sample-rate 16000 \
  /path/to/foo.wav \
  /path/to/bar.wav

(3) nbest-rescoring
./zipformer/jit_pretrained_ctc.py \
  --model-filename ./zipformer/exp/jit_script.pt \
  --HLG data/lang_bpe_500/HLG.pt \
  --words-file data/lang_bpe_500/words.txt  \
  --G data/lm/G_4_gram.pt \
  --method nbest-rescoring \
  --sample-rate 16000 \
  /path/to/foo.wav \
  /path/to/bar.wav

(4) whole-lattice-rescoring
./zipformer/jit_pretrained_ctc.py \
  --model-filename ./zipformer/exp/jit_script.pt \
  --HLG data/lang_bpe_500/HLG.pt \
  --words-file data/lang_bpe_500/words.txt  \
  --G data/lm/G_4_gram.pt \
  --method whole-lattice-rescoring \
  --sample-rate 16000 \
  /path/to/foo.wav \
  /path/to/bar.wav
"""

import argparse
import logging
import math
from typing import List

import k2


import kaldifeat
import torch
import torchaudio
from ctc_decode import get_decoding_params
from export import num_tokens
from torch.nn.utils.rnn import pad_sequence
from train import get_params

from gxl_ai_utils.utils import utils_file

def get_parser():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )

    parser.add_argument(
        "--model-filename",
        type=str,
        required=True,
        help="Path to the torchscript model.",
    )

    parser.add_argument(
        "--tokens",
        type=str,
        help="""Path to tokens.txt.
        Used only when method is ctc-decoding.
        """,
    )




    parser.add_argument(
        "--sample-rate",
        type=int,
        default=16000,
        help="The sample rate of the input sound file",
    )

    # parser.add_argument(
    #     "sound_files",
    #     type=str,
    #     nargs="+",
    #     help="The input sound file(s) to transcribe. "
    #     "Supported formats are those supported by torchaudio.load(). "
    #     "For example, wav and flac are supported. "
    #     "The sample rate has to be 16kHz.",
    # )
    parser.add_argument(
        "--input_wav_scp",
        type=str,
        help="",
    )
    parser.add_argument(
        "--output_text",
        type=str,
        help="",
    )
    parser.add_argument(
        "--debug",
        type=bool,
        default=False,
        help="",
    )

    return parser


def read_sound_files(
    filenames: List[str], expected_sample_rate: float = 16000
) -> List[torch.Tensor]:
    """Read a list of sound files into a list 1-D float32 torch tensors.
    Args:
      filenames:
        A list of sound filenames.
      expected_sample_rate:
        The expected sample rate of the sound files.
    Returns:
      Return a list of 1-D float32 torch tensors.
    """
    ans = []
    for f in filenames:
        wave, sample_rate = torchaudio.load(f)
        assert (
            sample_rate == expected_sample_rate
        ), f"Expected sample rate: {expected_sample_rate}. Given: {sample_rate}"
        # We use only the first channel
        ans.append(wave[0].contiguous())
    return ans

def greedy_decode(ctc_output, blank_index):
    """
    Perform greedy search decoding on CTC output.

    Args:
        ctc_output (torch.Tensor): CTC output tensor of shape (batch_size, T, vocab_size).
        blank_index (int): Index of the blank character in the vocabulary.

    Returns:
        List of decoded sequences.
    """
    batch_size, T, vocab_size = ctc_output.shape
    decoded_sequences = []

    for batch in range(batch_size):
        # Get the most probable index at each time step
        max_indices = ctc_output[batch].argmax(dim=1).tolist()

        # Decode the sequence
        decoded_sequence = []
        prev_char = None

        for char_index in max_indices:
            if char_index != blank_index:
                # If the current character is not blank and not the same as the previous character
                if char_index != prev_char:
                    decoded_sequence.append(char_index)
                prev_char = char_index

                # Convert list of indices to the final decoded sequence
        decoded_sequences.append(decoded_sequence)

    return decoded_sequences

@torch.no_grad()
def main():
    parser = get_parser()
    args = parser.parse_args()

    params = get_params()
    # add decoding params
    params.update(get_decoding_params())
    params.update(vars(args))
    is_debug = params.debug
    print('isdebug:', is_debug)

    token_table = k2.SymbolTable.from_file(params.tokens)
    params.vocab_size = num_tokens(token_table) + 1

    logging.info(f"{params}")

    device = torch.device("cpu")
    # if torch.cuda.is_available():
    #     device = torch.device("cuda", 0)

    logging.info(f"device: {device}")

    model = torch.jit.load(args.model_filename)
    model.to(device)
    model.eval()

    logging.info("Constructing Fbank computer")
    opts = kaldifeat.FbankOptions()
    opts.device = device
    opts.frame_opts.dither = 0
    opts.frame_opts.snip_edges = False
    opts.frame_opts.samp_freq = params.sample_rate
    opts.mel_opts.num_bins = params.feature_dim
    opts.mel_opts.high_freq = -400

    fbank = kaldifeat.Fbank(opts)
    # waves = [res_wav]
    # waves = [w.to(device) for w in waves]
    # features = fbank(waves)
    # feature_lengths = [f.size(0) for f in features]


    wav_path_dict = utils_file.load_dict_from_scp(params.input_wav_scp)
    res_text_dict = {}
    expected_sample_rate = 16000
    logging.info("Decoding started")
    num_decoded_num = 0
    output_f = open(args.output_text, 'w', encoding='utf-8')
    for key, wav_path in wav_path_dict.items():
        wave, sample_rate = torchaudio.load(wav_path)
        assert (
                sample_rate == expected_sample_rate
        ), f"Expected sample rate: {expected_sample_rate}. Given: {sample_rate}"
        # We use only the first channel
        res_wav = wave[0].contiguous()
        res_wav = res_wav.to(device)
        if is_debug:
            print('----------------wav-----------------------start')
            print("wav shape:\n ",res_wav.shape)
            print("wav 前20个值\n ",res_wav[:20])
            print('----------------wav----------end')
        fbank_one = torchaudio.compliance.kaldi.fbank(
            res_wav.unsqueeze(0),
            snip_edges=False,
            high_freq=-400,
            num_mel_bins=80,
            dither=0,
        )
        fbank_one = fbank_one.to(device)
        if is_debug:
            print('----------------fbank----------start')
            print("fbank shape:\n ",fbank_one.shape)
            print("fbank 第一帧\n ",fbank_one[0])
            # print("fbank 第二帧\n ",fbank_one[1])
            print('----------------fbank----------end')

        # print("从文件中获得的fbank")
        # data_list = []
        # with open("./feat.list", 'r') as file:
        #     for line in file.readlines():
        #         elements = line.strip().split(' ')  # 按空格分割每行数据
        #         row_data = [float(element) for element in elements]  # 将每个元素转换为浮点数
        #         data_list.append(row_data)
        # data_numpy = np.array(data_list)
        # fbank_one = torch.tensor(data_numpy, dtype=torch.float32).to(device)
        # print("fbank_one shape", fbank_one.shape)
        # print('比较第一针')
        # print(fbank_one[0])
        # print(fbank_one_old[0])
        # print('比较最后一针')
        # print(fbank_one[-1])
        # print(fbank_one_old[-1])

        # diff_abs = torch.abs(fbank_one_old - fbank_one)
        # atol_max = torch.max(diff_abs)
        # print("atol_max of fbank:", atol_max)

        features = [fbank_one]
        feature_lengths = [f.size(0) for f in features]
        features = pad_sequence(features, batch_first=True, padding_value=math.log(1e-10))
        feature_lengths = torch.tensor(feature_lengths, device=device)
        encoder_out, _, _ = model.encoder.forward_chunk(features)
        ctc_output_old = model.ctc_activation(encoder_out)  # (N, T, C)
        if is_debug:
            print('----------------ctc_output_old----------start')
            print("ctc_output shape:\n ",ctc_output_old.shape)
            print('打印第一帧ctc_output的最后20个值', ctc_output_old[0, 0, -20:])
            print('----------------ctc_output_old----------end')

        # #  从文件中发的到ctc_logit
        # print("从文件中得的到ctc_logit")
        # data_list = []
        # with open("./logit.list", 'r') as file:
        #     for line in file.readlines():
        #         elements = line.strip().split(' ')  # 按空格分割每行数据
        #         row_data = [float(element) for element in elements]  # 将每个元素转换为浮点数
        #         data_list.append(row_data)
        # data_numpy = np.array(data_list)
        # ctc_output = torch.from_numpy(data_numpy).unsqueeze(0).to(device)
        ctc_output = ctc_output_old
        token_ids = greedy_decode(ctc_output, blank_index=0)
        hyps = [[token_table[i] for i in ids] for ids in token_ids]
        for hyp in hyps:
            res_item = []
            for char in hyp:
                char = char.replace("▁", " ").strip()
                res_item.append(char)
            res_item_str = "".join(res_item)
            print(f'{num_decoded_num}/{len(wav_path_dict)} {key} {res_item_str}')
            output_f.write(f'{key} {res_item_str}\n')
        num_decoded_num += 1
    output_f.flush()
    output_f.close()

        # print('开始比较两者prob的差异')
        # A = ctc_output_old.squeeze(0)
        # B = ctc_output.squeeze(0)
        # diff = torch.abs(A - B)
        # diff_numpy = diff.detach().cpu().numpy()
        # plt.imshow(diff_numpy, cmap='RdBu', aspect='auto')
        # plt.colorbar()
        # plt.title("Difference between Tensor A and Tensor B")
        # plt.xlabel("Column Index")
        # plt.ylabel("Row Index")
        # plt.savefig('./fig.png')
        # diff_abs = torch.abs(A - B)
        # atol_max = torch.max(diff_abs)
        # print("atol_max:", atol_max)
        # non_zero_mask = B != 0
        # diff_abs_minus_atol_div_B = torch.where(non_zero_mask, (torch.abs(A - B) - atol_max) / torch.abs(B),
        #                                         torch.tensor(float('inf')))
        # rtol_max = torch.max(diff_abs_minus_atol_div_B)
        # print("rtol_max:", rtol_max)





if __name__ == "__main__":
    formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"

    logging.basicConfig(format=formatter, level=logging.INFO)
    main()
