root_dir = "../../"
config_path = f"{root_dir}/conf/train_2025_9_10_xlgeng.yaml"
#   将 root_dir 加入 Pythonpath 环境变量
import sys
sys.path.insert(0, root_dir)
from typing import Dict, List

from gxl_ai_utils.utils import utils_file

# from wenet.models.moe_comformer.asr_encoder import init_moe_comformer
from wenet.models.moe_comformer.ctc import CTC


import torch
import torch.nn as nn


def make_seq_mask(feat_lengths, max_len=None):
    """
    feat_lengths: (batch,) 每条序列的有效帧数
    max_len: 可选，默认取 feat_lengths 的最大值
    return: (batch, max_len) bool mask
            True = 有效帧, False = padding
    """
    batch_size = feat_lengths.size(0)
    if max_len is None:
        max_len = feat_lengths.max().item()

    # (max_len,) -> (1, max_len)
    seq_range = torch.arange(max_len, device=feat_lengths.device).unsqueeze(0)

    # (batch, 1) vs (1, max_len) 广播
    seq_mask = seq_range < feat_lengths.unsqueeze(1)

    return seq_mask

from wenet.models.moe_comformer.search import (DecodeResult,
                                        attention_beam_search,
                                        attention_rescoring,
                                        ctc_greedy_search,
                                        ctc_prefix_beam_search)

class BestRQ_CTC_FT_withFSQ(nn.Module):
    def __init__(
        self,
        conformer: torch.nn.Module,
        vocab_size: int=6710,
    ):
        super().__init__()

        self.conformer = conformer
        self.embed_dim = 384

        self.pad_id = -100

        self.ctc = CTC(vocab_size, self.embed_dim)


    def forward(
        self,
        batch_dict: Dict,
        device: torch.device,
        # x,
        # x_lengths,
    ):
        """
        输入 wav samples;
        x: (batch, time)
        x_lengths: (batch,)

        """
        x = batch_dict['feats'].to(device).to(torch.float32)
        x_lengths = batch_dict['feats_lengths'].to(device).to(torch.int32)
        text = batch_dict['target'].to(device).to(torch.int32)
        text_lengths = batch_dict['target_lengths'].to(device).to(torch.int32)

        features, mask  = self.conformer(
            x,
            x_lengths,
        )
        features_lengths = mask.squeeze(1).long().sum(1)
        loss_ctc, ctc_probs = self.ctc(features, features_lengths, text,
                                       text_lengths)
        return {
            "loss": loss_ctc,
            "ctc_loss": loss_ctc,
        }

    def decode(
            self,
            methods: List[str],
            speech: torch.Tensor,
            speech_lengths: torch.Tensor,
            beam_size: int = 1,
            decoding_chunk_size: int = -1,
            num_decoding_left_chunks: int = -1,
            ctc_weight: float = 0.0,
            simulate_streaming: bool = False,
            reverse_weight: float = 0.0,
            context_graph = None,
            blank_id: int = 0,
            blank_penalty: float = 0.0,
            length_penalty: float = 0.0,
            infos: Dict[str, List[str]] = None,
    ) -> Dict[str, List[DecodeResult]]:
        """ Decode input speech

        Args:
            methods:(List[str]): list of decoding methods to use, which could
                could contain the following decoding methods, please refer paper:
                https://arxiv.org/pdf/2102.01547.pdf
                   * ctc_greedy_search
                   * ctc_prefix_beam_search
                   * atttention
                   * attention_rescoring
            speech (torch.Tensor): (batch, max_len, feat_dim)
            speech_length (torch.Tensor): (batch, )
            beam_size (int): beam size for beam search
            decoding_chunk_size (int): decoding chunk for dynamic chunk
                trained model.
                <0: for decoding, use full chunk.
                >0: for decoding, use fixed chunk size as set.
                0: used for training, it's prohibited here
            simulate_streaming (bool): whether do encoder forward in a
                streaming fashion
            reverse_weight (float): right to left decoder weight
            ctc_weight (float): ctc score weight

        Returns: dict results of all decoding methods
        """
        utils_file.logging_info(f"decoding with methods: {methods}")
        assert speech.shape[0] == speech_lengths.shape[0], "Mismatched batch size"
        assert decoding_chunk_size != 0, "decoding_chunk_size == 0 is only for training"
        encoder_out, encoder_mask = self.conformer(
            speech,
            speech_lengths,
        )
        encoder_lens = encoder_mask.squeeze(1).long().sum(1)
        if blank_penalty > 0.0:
            logits = self.ctc.ctc_lo(encoder_out)
            logits[:, :, blank_id] -= blank_penalty
            ctc_probs = logits.log_softmax(dim=2)
        else:
            ctc_probs = self.ctc.log_softmax(encoder_out)
        results = {}
        if 'attention' in methods:
            results['attention'] = attention_beam_search(
                self, encoder_out, encoder_mask, beam_size, length_penalty,
                infos)
        if 'ctc_greedy_search' in methods:
            try:
                results['ctc_greedy_search'] = ctc_greedy_search(
                ctc_probs, encoder_lens, blank_id)
                # utils_file.logging_info(f"ctc_greedy_search done, res: {results['ctc_greedy_search']}")
            except Exception as e:
                utils_file.logging_error(f"ctc_greedy_search error: {e}")
            
        if 'ctc_prefix_beam_search' in methods:
            ctc_prefix_result = ctc_prefix_beam_search(ctc_probs, encoder_lens,
                                                       beam_size,
                                                       None, blank_id)
            results['ctc_prefix_beam_search'] = ctc_prefix_result
        if 'attention_rescoring' in methods:
            # attention_rescoring depends on ctc_prefix_beam_search nbest
            if 'ctc_prefix_beam_search' in results:
                ctc_prefix_result = results['ctc_prefix_beam_search']
            else:
                ctc_prefix_result = ctc_prefix_beam_search(
                    ctc_probs, encoder_lens, beam_size, context_graph,
                    blank_id)
            if self.apply_non_blank_embedding:
                encoder_out, _ = self.filter_blank_embedding(
                    ctc_probs, encoder_out)
            results['attention_rescoring'] = attention_rescoring(
                self, ctc_prefix_result, encoder_out, encoder_lens, ctc_weight,
                reverse_weight, infos)
        return results




if __name__ == '__main__':
    """"""
    from wenet.models.moe_comformer.asr_encoder import init_moe_comformer_with_FSQ
    configs = utils_file.load_dict_from_yaml(config_path)
    # Init asr model from configs
    encoder = init_moe_comformer_with_FSQ(configs)
    print(encoder)
    ctc_model = BestRQ_CTC_FT_withFSQ(encoder)
    print(ctc_model)
    fake_fbank = torch.randn(2, 232, 80)
    fake_fbank_lengths = torch.tensor([232, 224])
    fake_text = torch.randint(0, 6710, (2, 10))
    fake_text_lengths = torch.tensor([10, 7])
    fake_batch_dict = {'feats': fake_fbank,
                       'feats_lengths': fake_fbank_lengths,
                       'target': fake_text,
                       'target_lengths': fake_text_lengths
                       }
    device = torch.device('cuda')
    ctc_model.to(device)
    res_dict = ctc_model(fake_batch_dict, device)
    print(res_dict)
