import math
import os.path
from pathlib import Path
from typing import List, Tuple
import sys

# import k2
import torchaudio
from torch.nn.utils.rnn import pad_sequence

sys.path.insert(0, '../../')

import torch
from gxl_ai_utils.utils import utils_file
from torch import Tensor, nn
from wenet.zipformer.train import get_model
from wenet.zipformer.config import CONFIG_ZIPFORMER_PARAM
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
    """
    Args:
      lengths:
        A 1-D tensor containing sentence lengths.
      max_len:
        The length of masks.
    Returns:
      Return a 2-D bool tensor, where masked positions
      are filled with `True` and non-masked positions are
      filled with `False`.
    """
    assert lengths.ndim == 1, lengths.ndim
    max_len = max(max_len, lengths.max().item())  # Ensure max_len is an integer
    n = lengths.size(0)  # This is already an integer
    seq_range = torch.arange(0, max_len, device=lengths.device)
    expaned_lengths = seq_range.unsqueeze(0).expand(int(n), int(max_len))
    return expaned_lengths >= lengths.unsqueeze(-1)

class StreamingEncoderModel(nn.Module):
    """A wrapper for encoder and encoder_embed"""

    def __init__(self, encoder: nn.Module, encoder_embed: nn.Module) -> None:
        super().__init__()
        assert len(encoder.chunk_size) == 1, encoder.chunk_size
        assert len(encoder.left_context_frames) == 1, encoder.left_context_frames
        self.chunk_size = encoder.chunk_size[0]
        self.left_context_len = encoder.left_context_frames[0]

        # The encoder_embed subsample features (T - 7) // 2
        # The ConvNeXt module needs (7 - 1) // 2 = 3 frames of right padding after subsampling
        self.pad_length = 7 + 2 * 3

        self.encoder = encoder
        self.encoder_embed = encoder_embed

    def get_output_dim(self):
        return 768

    def output_size(self):
        return self.get_output_dim()

    def forward(
        self, features: Tensor, feature_lengths: Tensor
    ) -> Tuple[Tensor, Tensor]:
        """
        Args:
            features: (N, T, C)
            feature_lengths: (N,)
        """
        # import pdb;pdb.set_trace()
        # print("features: ", features.shape, "feature_lengths:", feature_lengths)
        x, x_lens = self.encoder_embed(features, feature_lengths)
        # print("encoder_out: ", x.shape, "x_lens:", x_lens)
        src_key_padding_mask = make_pad_mask(x_lens)
        # print("src_key_padding_mask: ", src_key_padding_mask.shape)
        x = x.permute(1, 0, 2)  # (N, T, C) -> (T, N, C)
        encoder_out, encoder_out_lens = self.encoder(x, x_lens, src_key_padding_mask)
        # print("encoder_out: ", encoder_out.shape, "encoder_out_lens:", encoder_out_lens)
        encoder_out = encoder_out.permute(1, 0, 2)  # (T, N, C) ->(N, T, C)
        # print("encoder_out: ", encoder_out.shape)
        return encoder_out, encoder_out_lens

    def forward_(
        self, features: Tensor, feature_lengths: Tensor, states: List[Tensor]
    ) -> Tuple[Tensor, Tensor, List[Tensor]]:
        """Streaming forward for encoder_embed and encoder.

        Args:
            features: (N, T, C)
            feature_lengths: (N,)
            states: a list of Tensors

        Returns encoder outputs, output lengths, and updated states.
        """
        # import pdb; pdb.set_trace()
        chunk_size = self.chunk_size
        left_context_len = self.left_context_len

        cached_embed_left_pad = states[-2]
        x, x_lens, new_cached_embed_left_pad = self.encoder_embed.streaming_forward(
            x=features,
            x_lens=feature_lengths,
            cached_left_pad=cached_embed_left_pad,
        )
        # assert x.size(1) == chunk_size, (x.size(1), chunk_size)

        src_key_padding_mask = make_pad_mask(x_lens)

        # processed_mask is used to mask out initial states
        processed_mask = torch.arange(left_context_len, device=x.device).expand(
            x.size(0), left_context_len
        )
        processed_lens = states[-1]  # (batch,)
        # (batch, left_context_size)
        processed_mask = (processed_lens.unsqueeze(1) <= processed_mask).flip(1)
        # Update processed lengths
        new_processed_lens = processed_lens + x_lens

        # (batch, left_context_size + chunk_size)
        src_key_padding_mask = torch.cat([processed_mask, src_key_padding_mask], dim=1)

        x = x.permute(1, 0, 2)  # (N, T, C) -> (T, N, C)
        encoder_states = states[:-2]

        (
            encoder_out,
            encoder_out_lens,
            new_encoder_states,
        ) = self.encoder.streaming_forward(
            x=x,
            x_lens=x_lens,
            states=encoder_states,
            src_key_padding_mask=src_key_padding_mask,
        )
        encoder_out = encoder_out.permute(1, 0, 2)  # (T, N, C) ->(N, T, C)

        new_states = new_encoder_states + [
            new_cached_embed_left_pad,
            new_processed_lens,
        ]
        return encoder_out, encoder_out_lens, new_states

    def forward_2(
        self, features: Tensor, feature_lengths: Tensor, states: List[Tensor]
    ) -> Tuple[Tensor, Tensor]:
        """Streaming forward for encoder_embed and encoder.

        Args:
            features: (N, T, C)
            feature_lengths: (N,)
            states: a list of Tensors

        Returns encoder outputs, output lengths, and updated states.
        """
        chunk_size = self.chunk_size
        left_context_len = self.left_context_len

        cached_embed_left_pad = states[-2]
        x, x_lens, new_cached_embed_left_pad = self.encoder_embed.streaming_forward(
            x=features,
            x_lens=feature_lengths,
            cached_left_pad=cached_embed_left_pad,
        )
        src_key_padding_mask = make_pad_mask(x_lens)
        x = x.permute(1, 0, 2)  # (N, T, C) -> (T, N, C)
        encoder_out, encoder_out_lens = self.encoder(x, x_lens, src_key_padding_mask)
        return encoder_out, encoder_out_lens

    def forward_chunk(
        self,
        xs: torch.Tensor,
        state: List[torch.Tensor],
        offset: int=0,
        required_cache_size: int=1,
        att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
        cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
        att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
    ) -> Tuple[Tensor, Tensor, Tensor, List[Tensor]]:
        feature_lengths = torch.tensor([x.size(0) for x in xs], dtype=torch.long, device=xs.device)
        # x, x_lens = self.encoder_embed(features, feature_lengths)
        # x = torch.randn(1, 13, 768,device=xs.device)
        # x_lens = torch.tensor([13],dtype=torch.long, device=xs.device)
        # src_key_padding_mask = make_pad_mask(x_lens)
        # x = x.permute(1, 0, 2)  # (N, T, C) -> (T, N, C)
        # encoder_out, encoder_out_lens = self.encoder(x, x_lens, src_key_padding_mask)
        # encoder_out, encoder_out_lens = self.encoder(x, x_lens)
        # encoder_out = encoder_out.permute(1, 0, 2)  # (T, N, C) ->(N, T, C)
        # state = self.get_init_states(feature_lengths.size(0), xs.device)
        encoder_out, encoder_out_lens, new_states = self.forward_(xs, feature_lengths, state)
        return encoder_out, torch.zeros(1, 1, 1), torch.zeros(1, 1, 1), new_states


    # def forward_chunk(
    #     self,
    #     xs: torch.Tensor,
    #     offset: int,
    #     required_cache_size: int,
    #     att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
    #     cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
    #     att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
    # ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    #     states = self.encoder.get_init_states(device=xs.device)
    #     features = xs
    #     feature_lengths = torch.tensor([x.size(1) for x in xs], dtype=torch.long, device=xs.device)
    #     cached_embed_left_pad = states[-2]
    #     x, x_lens, new_cached_embed_left_pad = self.encoder_embed.streaming_forward(
    #         x=features,
    #         x_lens=feature_lengths,
    #         cached_left_pad=cached_embed_left_pad,
    #     )
    #     # x, x_lens = self.encoder_embed(features, feature_lengths)
    #     # src_key_padding_mask = make_pad_mask(x_lens)
    #     # x = x.permute(1, 0, 2)  # (N, T, C) -> (T, N, C)
    #     # encoder_out, encoder_out_lens = self.encoder(x, x_lens, src_key_padding_mask)
    #     # encoder_out = encoder_out.permute(1, 0, 2)  # (T, N, C) ->(N, T, C)
    #     return torch.zeros(1, 1, 1), torch.zeros(1, 1, 1), torch.zeros(1, 1, 1)

    @torch.jit.export
    def get_init_states(
        self,
        batch_size: int = 1,
        device: torch.device = torch.device("cpu"),
    ) -> List[torch.Tensor]:
        """
        Returns a list of cached tensors of all encoder layers. For layer-i, states[i*6:(i+1)*6]
        is (cached_key, cached_nonlin_attn, cached_val1, cached_val2, cached_conv1, cached_conv2).
        states[-2] is the cached left padding for ConvNeXt module,
        of shape (batch_size, num_channels, left_pad, num_freqs)
        states[-1] is processed_lens of shape (batch,), which records the number
        of processed frames (at 50hz frame rate, after encoder_embed) for each sample in batch.
        """
        states = self.encoder.get_init_states(batch_size, device)

        embed_states = self.encoder_embed.get_init_states(batch_size, device)
        states.append(embed_states)

        processed_lens = torch.zeros(batch_size, dtype=torch.int32, device=device)
        states.append(processed_lens)
        return states
        # 获取encoder的初始状态
        # batch_size = 1
        # device = torch.device("cpu")
        # encoder_states = self.encoder.get_init_states(batch_size, device)
        # # 获取encoder_embed的初始状态
        # embed_states = self.encoder_embed.get_init_states(batch_size, device)
        # # 创建processed_lens张量
        # processed_lens = torch.zeros(batch_size, dtype=torch.int32, device=device)
        #
        # # 将三者合并为一个列表，注意这里没有使用可变的append操作，而是直接构建列表
        # all_states = list(encoder_states) + list(embed_states) + [processed_lens]
        # # assert len(all_states) == 116
        # # 将列表转换为元组并返回
        # # res = tuple(all_states)
        # return all_states

def greedy_decode(ctc_output, blank_index):
    """
    Perform greedy search decoding on CTC output.

    Args:
        ctc_output (torch.Tensor): CTC output tensor of shape (batch_size, T, vocab_size).
        blank_index (int): Index of the blank character in the vocabulary.

    Returns:
        List of decoded sequences.
    """
    batch_size, T, vocab_size = ctc_output.shape
    decoded_sequences = []

    for batch in range(batch_size):
        # Get the most probable index at each time step
        max_indices = ctc_output[batch].argmax(dim=1).tolist()

        # Decode the sequence
        decoded_sequence = []
        prev_char = None

        for char_index in max_indices:
            if char_index != blank_index:
                # If the current character is not blank and not the same as the previous character
                if char_index != prev_char:
                    decoded_sequence.append(char_index)
                prev_char = char_index

                # Convert list of indices to the final decoded sequence
        decoded_sequences.append(decoded_sequence)

    return decoded_sequences

def get_zipformer_encoder(ckpt_path):
    """"""
    model = get_model(CONFIG_ZIPFORMER_PARAM)
    conifigs_ckpt = torch.load(ckpt_path, map_location='cpu')
    model_ckpt = conifigs_ckpt['model']
    model.load_state_dict(model_ckpt, strict=True)
    encoder = StreamingEncoderModel(
        encoder=model.encoder,
        encoder_embed=model.encoder_embed
    )
    return encoder

def get_encoder_and_ctc(ckpt_path, device, if_decode=True):
    """"""
    model = get_model(CONFIG_ZIPFORMER_PARAM)
    conifigs_ckpt = torch.load(ckpt_path)
    model_ckpt = conifigs_ckpt['model']
    model.load_state_dict(model_ckpt, strict=True)
    encoder = StreamingEncoderModel(
        encoder=model.encoder,
        encoder_embed=model.encoder_embed
    )
    ctc_linear = model.ctc_output
    encoder = encoder.to(device)
    ctc_linear = ctc_linear.to(device)
    if if_decode:
        encoder.eval()
        ctc_linear.eval()
    else:
        encoder.train()
        ctc_linear.train()
    return encoder, ctc_linear



# from wenet.zipformer.config import CONFIG_ZIPFORMER_PARAM
def load_model_k2(ckpt_path='/home/work_nfs15/asr_data/ckpt/asr_online_system/epoch-10.pt'):
    """"""

    from config import CONFIG_ZIPFORMER_PARAM
    model = get_model(CONFIG_ZIPFORMER_PARAM)
    # load_checkpoint(Path(ckpt_path), model)
    conifigs_ckpt = torch.load(ckpt_path)
    model_ckpt = conifigs_ckpt['model']
    model.load_state_dict(model_ckpt, strict=True)
    model.eval()
    utils_file.logging_print('model:')
    utils_file.print_model_size(model)
    utils_file.logging_print('encoder:')
    utils_file.print_model_size(model.encoder)
    utils_file.logging_print('attention_decoder:')
    utils_file.print_model_size(model.attention_decoder)
    utils_file.logging_print('encoder_embed:')
    utils_file.print_model_size(model.encoder_embed)
    utils_file.logging_print('ctc_output:')
    utils_file.print_model_size(model.ctc_output)

    utils_file.logging_print('new_encoder:')
    new_encoder = StreamingEncoderModel(
        encoder=model.encoder,
        encoder_embed=model.encoder_embed
    )
    utils_file.logging_print('ctc_linear:')
    ctc_linear = model.ctc_output
    input_wav_scp = "./wav_100.scp"
    true_text_path = "/home/work_nfs15/asr_data/data/asr_test_sets/aishell/text"
    output_res_path = "./res_output/text_hyp"
    output_wer_dir_path = "./res_output"
    utils_file.makedir_sil(os.path.dirname(output_res_path))
    token_path='/home/work_nfs15/asr_data/ckpt/asr_online_system/lang_char_bpe/tokens.txt'

    wav_path_dict = utils_file.load_dict_from_scp(input_wav_scp)
    res_text_dict = {}
    expected_sample_rate = 16000
    utils_file.logging_print("Decoding started")
    num_decoded_num = 0
    output_f = open(output_res_path, 'w', encoding='utf-8')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    new_encoder = new_encoder.to(device)
    new_encoder.eval()
    print("new_encoder.training:",new_encoder.training)
    print("encoder.training:",new_encoder.encoder.training)
    print("encoder_embed.training:",new_encoder.encoder_embed.training)
    ctc_linear = ctc_linear.to(device)
    num_decode = 0
    token_table = {int(v): k for k, v in utils_file.load_dict_from_scp(token_path).items()}
    features = []
    for key, wav_path in wav_path_dict.items():
        utils_file.logging_print('这是第', num_decoded_num, '个音频')
        wave, sample_rate = torchaudio.load(wav_path)
        assert (
                sample_rate == expected_sample_rate
        ), f"Expected sample rate: {expected_sample_rate}. Given: {sample_rate}"
        # We use only the first channel
        res_wav = wave[0].contiguous()
        res_wav = res_wav.to(device)
       #print(res_wav[:100])

        fbank_one = torchaudio.compliance.kaldi.fbank(
            res_wav.unsqueeze(0),
            snip_edges=False,
            high_freq=-400,
            num_mel_bins=80,
            dither=0,
        )
        fbank_one = fbank_one.to(device)
        features.append(fbank_one)
        if len(features) ==1:
            feature_lengths = [f.size(0) for f in features]
            features = pad_sequence(features, batch_first=True, padding_value=0)
            feature_lengths = torch.tensor(feature_lengths, device=device)
            # state = new_encoder.get_init_states(batch_size=1, device=device)
            # encoder_out, _,_ = new_encoder.forward_(features, feature_lengths,state)
            encoder_out, _= new_encoder.forward(features, feature_lengths)
           #print('encoder_out shape',encoder_out.shape)
           #print('encoder_out 第一帧的样子', encoder_out[0][0])
            # ctc_output = ctc_linear(encoder_out)  # (N, T, C)
            ctc_output = ctc_linear(encoder_out)  # (N, T, C)
           #print("ctc_output shape:\n ",ctc_output.shape)
           #print('ctc output 第一帧的样子', ctc_output[0][0])
            token_ids = greedy_decode(ctc_output, blank_index=0)
           #print(token_ids)
            # token_table = k2.SymbolTable.from_file(token_path)
            hyps = [[token_table[i] for i in ids] for ids in token_ids]
            for hyp in hyps:
                res_item = []
                for char in hyp:
                    char = char.replace("▁", " ").strip()
                    res_item.append(char)
                res_item_str = "".join(res_item)
                print(f'{num_decoded_num}/{len(wav_path_dict)} {key} {res_item_str}')
                output_f.write(f'{key} {res_item_str}\n')
            num_decoded_num += len(features)
            num_decode += len(features)
            features = []
            print('一个batch解码完毕-------------------------------------')
        else:
            continue
    output_f.flush()
    output_f.close()
    utils_file.do_compute_wer(true_text_path, output_res_path, output_wer_dir_path)
    res = utils_file.do_execute_shell_command(f"tail -n 10 {output_wer_dir_path}/wer")
    print(res)

def streaming_decode(ckpt_path='/home/work_nfs15/asr_data/ckpt/asr_online_system/epoch-10.pt'):
    """"""
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    encoder, ctc_linear = get_encoder_and_ctc(ckpt_path, device)
    input_wav_scp = "/home/node54_tmpdata/xlgeng/code/wenet_mine/icefall_eggs/wav_100.scp"
    true_text_path = "/home/work_nfs15/asr_data/data/asr_test_sets/aishell/text"
    output_res_path = "./res_output/text_hyp"
    output_wer_dir_path = "./res_output"
    utils_file.makedir_sil(os.path.dirname(output_res_path))
    token_path = '/home/work_nfs15/asr_data/ckpt/asr_online_system/lang_char_bpe/tokens.txt'
    wav_path_dict = utils_file.load_dict_from_scp(input_wav_scp)
    expected_sample_rate = 16000
    utils_file.logging_print("Decoding started")
    num_decoded_num = 0
    output_f = open(output_res_path, 'w', encoding='utf-8')
    num_decode = 0
    chunk_size=16
    token_table = {int(v): k for k, v in utils_file.load_dict_from_scp(token_path).items()}
    for key, wav_path in wav_path_dict.items():
        num_decoded_num +=1
        utils_file.logging_print('这是第', num_decoded_num, f'个音频, 开始流式识别，chunk_size={chunk_size}')
        wave, sample_rate = torchaudio.load(wav_path)
        assert (
                sample_rate == expected_sample_rate
        ), f"Expected sample rate: {expected_sample_rate}. Given: {sample_rate}"
        res_wav = wave[0].contiguous()
        res_wav = res_wav.to(device)

        fbank_one = torchaudio.compliance.kaldi.fbank(
            res_wav.unsqueeze(0),
            snip_edges=False,
            high_freq=-400,
            num_mel_bins=80,
            dither=0,
        )
        # 帧数
        frame_fbank_num = fbank_one.size(0)
        print("fbank shape:\n ",fbank_one.shape, '帧数：', frame_fbank_num)
        chunk_size_fbank = chunk_size*4
        one_state = encoder.get_init_states(batch_size=1,device=device)
        res_text = ""
        res_encoder_frames = []

        for i in range(frame_fbank_num//chunk_size_fbank+1):
            if i ==0:
                fbank_one_chunk = fbank_one[i * chunk_size_fbank:(i + 1) * chunk_size_fbank+13]
            else:
                fbank_one_chunk = fbank_one[i * chunk_size_fbank:(i + 1) * chunk_size_fbank+13]

            # fbank_one_chunk = fbank_one[i*chunk_size_fbank:(i+1)*chunk_size_fbank]
            if i>0 and fbank_one_chunk.size(0) < chunk_size_fbank:
                fbank_one_chunk =  torch.nn.functional.pad(fbank_one_chunk, (0, 0, 0, chunk_size_fbank-fbank_one_chunk.size(0) ))
            features = [fbank_one_chunk]
            feature_lengths = [f.size(0) for f in features]
            features = pad_sequence(features, batch_first=True, padding_value=0)
            feature_lengths = torch.tensor(feature_lengths, device=device)
            encoder_out, _, one_state = encoder.forward_(features, feature_lengths, one_state)
            res_encoder_frames.append(encoder_out)
            ctc_output = ctc_linear(encoder_out)
            token_ids = greedy_decode(ctc_output, blank_index=0)
            hyps = [[token_table[i] for i in ids] for ids in token_ids]
            hyp = hyps[0]
            res_item = []
            for char in hyp:
                char = char.replace("▁", " ").strip()
                res_item.append(char)
            res_item_str = "".join(res_item)
            res_text += res_item_str
            print(f'第{i}个chunk,中间结果是：{res_text}')
        all_encoder_frames = torch.cat(res_encoder_frames, dim=1)
        ctc_output = ctc_linear(all_encoder_frames)
        token_ids = greedy_decode(ctc_output, blank_index=0)
        hyps = [[token_table[i] for i in ids] for ids in token_ids]
        hyp = hyps[0]
        res_item = []
        for char in hyp:
            char = char.replace("▁", " ").strip()
            res_item.append(char)
        res_item_str = "".join(res_item)
        print(f'final result:{res_item_str}')
        output_f.write(f'{key} {res_item_str}\n')
    output_f.flush()
    output_f.close()
    utils_file.do_compute_wer(true_text_path, output_res_path, output_wer_dir_path)
    res = utils_file.do_execute_shell_command(f"tail -n 10 {output_wer_dir_path}/wer")
    print(res)










def check_ckpt(ckpt_path='/home/work_nfs15/asr_data/ckpt/asr_online_system/epoch-10.pt'):
    """"""
    conifigs_ckpt = torch.load(ckpt_path)
    model_ckpt = conifigs_ckpt['model']
    keys = model_ckpt.keys()
    utils_file.print_list(keys)

# export CUDA_VISIBLE_DEVICES="5"
if __name__ == '__main__':
    streaming_decode()