from collections import defaultdict
from typing import List, Optional, Tuple, Union, Any

import math
import logging
import torch

from wenet.dataset.processor import feats_length_fn
from wenet.models.moe_comformer.best_rq_ctc_ft_with_fsq import BestRQ_CTC_FT_withFSQ
from wenet.models.moe_comformer.pure_encoder_v2r import ChunkMasker, MixtureOfEncodings
from wenet.models.moe_comformer.common import remove_duplicates_and_blank
from wenet.models.moe_comformer.mask import make_pad_mask
from wenet.models.moe_comformer.moe_train_utils import get_global_loss_dict, reset_global_loss_dict

from wenet.models.moe_comformer.download_simple import SpeechProjectionModule

class MoeComformer_ssl_encoder_with_FSQ(torch.nn.Module):
    """CTC-attention hybrid Encoder-Decoder model"""
    def __init__(
        self,
        embedder: torch.nn.Module,
        encoders: List,
        chunk_masker_conf: dict,
        **kwargs,
    ):
        super().__init__()

        self.embed = embedder
        self.chunk_masker = ChunkMasker(**chunk_masker_conf)
        self.shared_encoder = torch.nn.ModuleList(encoders[0])
        self.encoders = torch.nn.ModuleList(
            [torch.nn.ModuleList(encs) for encs in encoders[1:]])
        self.dim = embedder.pos_enc.d_model
        from vector_quantize_pytorch import FSQ
        self.fsq = FSQ(
            levels=[3, 3, 3, 3, 3, 3, 3, 3],
            dim=int(self.dim),  # 内部先将1280降为8维，再进行FSQ
            preserve_symmetry=True,  # 如果改变levels值（比如维度数量改变），可以确认下这个参数对量化结果的影响（post proj之前）
        )
        # self.simple_down_2 = SpeechProjectionModule(self.dim, self.dim, 2)

    def output_size(self):
        return self.dim

    def num_encoders(self):
        return len(self.encoders)

    def set_epoch(self, epoch):
        for enc in self.shared_encoder:
            enc.set_epoch(epoch)
        for encoder in self.encoders:
            for enc in encoder:
                enc.set_epoch(epoch)
        self.encoder_moe.set_epoch(epoch)

    def gather_stat_info(self, infos: list):
        for enc in self.shared_encoder:
            enc.gather_stat_info(infos)
        for encoder in self.encoders:
            for enc in encoder:
                enc.gather_stat_info(infos)

    def get_stat(self, infos: list):
        self.encoder_moe.get_stat(infos)

    def get_debug_stat(self, infos: list, name=''):
        for i, enc in enumerate(self.shared_encoder):
            enc.get_debug_stat(infos, f'shared_encoder{i}')
        for i, encoder in enumerate(self.encoders):
            for j, enc in enumerate(encoder):
                enc.get_debug_stat(infos, f'encoder{i}.{j}')

    def forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        input_mask: torch.Tensor=None,
        **kwargs
    ) -> Tuple[Optional[torch.Tensor],
               Optional[torch.Tensor]]:
        """Frontend + Encoder + Decoder + Calc loss

        Args:
            speech: (Batch, Length, ...)
            speech_lengths: (Batch, )
            text: (Batch, Length)
            text_lengths: (Batch,)
        Outputs:
            encodings: (Batch, Length, Dim)
            mask: (Batch, 1, Length)
        """
        # 1. Encoder (with embedder) and CTC loss.
        encodings, embeddings, masks = self._forward_encoder_input_mask(
            speech, speech_lengths, input_mask)
        # lengths = masks.squeeze(1).sum(1)  # (B,)
        return encodings, masks


    def _forward_encoder_input_mask(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        masks: torch.Tensor=None,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
    ) -> tuple[Any, Any, Any]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks_for_embed = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks_back_from_embed = self.embed(speech, masks_for_embed)
        if masks is not None:
            masks = masks.unsqueeze(1)
        else:
            masks = masks_back_from_embed
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        # xs, masks = self.simple_down_2(xs, masks.squeeze(1))
        # masks = masks.unsqueeze(1)
        # print("过完share encoder 的 xs.shape", xs.shape)
        quantize, indices = self.fsq(xs)
        # print("quantize shape:", quantize.shape)
        for i, encoder in enumerate(self.encoders):
            xs_d = quantize
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings = encodings[0]
        return encodings, embeddings, masks


    def tokenizer(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
    ) -> tuple[Any, Any]:
        masks_for_embed = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks_back_from_embed = self.embed(speech, masks_for_embed)
        masks = masks_back_from_embed
        chunk_masks = self.chunk_masker(masks, 1,-1)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, None)
        quantize, indices = self.fsq(xs)
        seq_lengths = masks.squeeze(1).sum(1)
        return indices, seq_lengths

    def _forward_encoder(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
    ) -> tuple[Any, Any, Any]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks = self.embed(speech, masks)
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        for i, encoder in enumerate(self.encoders):
            xs_d = xs
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings = encodings[0]
        return encodings, embeddings, masks

    def _forward_encoder_export_offline(self, xs, lengths, chunk_size=None):
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        return logits, xs_list[-1], out_lens

    def _forward_encoder_export_offline_proj(self, xs, lengths,
                                             chunk_size=None):
        assert self.projector is not None
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        proj, proj_lengths = self.projector(xs_list[-1], out_lens)
        return logits, xs_list[-1], out_lens, proj, proj_lengths

    def _forward_encoder_export_incremental(
        self, xs, lengths, att_caches, cache_lens, cnn_caches):
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        cache_lens = cache_lens.type(torch.int64)
        xs, _, masks = self.embed(xs, masks, cache_lens)
        emb = self.embed.position_encoding(
             cache_lens - att_caches.size(1), att_caches.size(1) + xs.size(1))
        n = self.num_causal_layers()
        d = self.dim
        att_caches = att_caches.reshape(xs.size(0), -1, n, d)
        cnn_caches = cnn_caches.reshape(xs.size(0), n, -1)
        i = 0
        r_att_list = []
        r_cnn_list = []
        ys = []
        for enc_list in [self.shared_encoder, self.encoders[0]]:
            for enc in enc_list:
                l = enc.num_layers()
                xs, att, cnn = enc.forward_incremental(
                    xs, emb, masks, att_caches[:, :, i: i + l],
                    cache_lens, cnn_caches[:, i: i + l])
                r_att_list += att
                r_cnn_list += cnn
                i += l
            ys.append(xs)
        ylens = masks.sum((1, 2)).type(torch.int32)
        return (self.ctc[0]._forward(ys[-1]), ys[0], ylens, ys[-1],
                torch.cat(r_att_list, 2), torch.cat(r_cnn_list, 1))

    def _forward_encoder_export_stable(self, xs, lengths, ys):
        emb = self.embed.position_encoding(0, xs.size(1))
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        # if output_proj:
        #     assert self.projector is not None
        #     proj, proj_lengths = self.projector(xs, lengths.type(torch.int64))
        #     return self.ctc[-1]._forward(xs), xs, lengths, proj, proj_lengths
        return self.ctc[-1]._forward(xs), xs, lengths

    def _forward_encoder_export_stable_proj(self, xs, lengths, ys):
        assert self.projector is not None
        emb = self.embed.position_encoding(0, xs.size(1))
        lengths = lengths.to(torch.int64)
        masks = ~make_pad_mask(lengths, xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        proj, proj_lengths = self.projector(xs, lengths)
        return (self.ctc[-1]._forward(xs), xs, lengths.type(torch.int32),
                proj, proj_lengths.type(torch.int32))

    def num_causal_layers(self):
        return sum(
            enc.num_layers() for enc in self.shared_encoder + self.encoders[0])

    def get_export_metadata(self):
        dim_meta = dict(D=self.dim,
                        C=self.encoders[0][0].encoders[0].conv_module.lorder,
                        L=self.num_causal_layers())
        return (self.embed.subsampling_rate, self.embed.right_context,
                dim_meta, self.vocab_size)

    def recognize_new(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: int = -1,
        num_decoding_left_chunks: int = -1,
        requests: dict = {},
        disable_logits: set = {},
    ) -> torch.Tensor:
        """
        Args:
            speech (torch.Tensor): (batch, max_len, feat_dim)
            speech_length (torch.Tensor): (batch, )
            decoding_chunk_size (int): decoding chunk for dynamic chunk
                trained model.
                <0: for decoding, use full chunk.
                >0: for decoding, use fixed chunk size as set.
                0: used for training, it's prohibited here

        Returns:
            dict<str, torch.Tensor>:
                key is request (mode + encoder_id)
                value is result tensor (batch, max_result_len)
        """
        assert speech.shape[0] == speech_lengths.shape[0]
        assert decoding_chunk_size != 0
        result = dict()
        encodings, _, masks = self._forward_encoder(speech, speech_lengths,
                                                    decoding_chunk_size,
                                                    num_decoding_left_chunks)
        enc_lens = masks.sum((1, 2))
        for enc, modes in requests.items():
            enc %= len(encodings)
            logits = self.ctc[enc]._forward(encodings[enc])
            for dl in disable_logits:
                logits[:,:,dl] = -float('inf')
            if 'greedy' in modes:
                result[f'greedy{enc}'] = self._greedy(logits, masks)
        return result

    def _greedy(self, ctc_probs, encoder_mask) -> List[List[int]]:
        batch_size = ctc_probs.shape[0]
        # Let's assume B = batch_size
        maxlen = encoder_mask.size(2)  # T
        encoder_out_lens = encoder_mask.squeeze(1).sum(1)
        topk_prob, topk_index = ctc_probs.topk(1, dim=2)  # (B, T, 1)
        topk_index = topk_index.view(batch_size, maxlen)  # (B, T)
        mask = make_pad_mask(encoder_out_lens, maxlen)  # (B, T)
        topk_index = topk_index.masked_fill_(mask, self.eos)  # (B, T)
        hyps = [hyp.tolist() for hyp in topk_index]
        scores = topk_prob.max(1)
        hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
        return hyps, scores





def init_moe_comformer_with_FSQ(configs):
    """"""
    from wenet.models.moe_comformer.embedder import init_embedder
    from wenet.models.moe_comformer.pure_encoder_v2r import ConformerEncoder
    from wenet.models.moe_comformer.merge_dicts import merge_dicts
    embedder = init_embedder(configs['input_dim'], **configs['embedder_conf'])  # 2维卷积 实现了6倍下采样
    dim = embedder.pos_enc.d_model
    odim = configs['output_dim']
    use_positional = configs['embedder_conf']['pos_enc_layer_type'] != 'no_pos'
    common_conf = configs.get('common_conf', {})
    encoder_common_conf = configs.get('encoder_common_conf', {})
    ssl_encoder = MoeComformer_ssl_encoder_with_FSQ(
        embedder=embedder,
        encoders=[[ConformerEncoder(
            dim, use_positional_embedding=use_positional,
            **merge_dicts(common_conf, encoder_common_conf, c),
        ) for c in conf] for conf in configs['encoder_conf']],
        **configs['model_conf']
    )
    return ssl_encoder

if __name__ == '__main__':
    """"""
    from gxl_ai_utils.utils import utils_file
    now = utils_file.do_get_now_time_by_second()
    test_config_path = "../../../examples/tencent_data/s0_fsq/conf/train_2025_9_10_xlgeng.yaml"
    configs = utils_file.load_dict_from_yaml(test_config_path)
    encoder = init_moe_comformer_with_FSQ(configs)
    print(encoder)
    fake_feats = torch.randn(2, 100, 80)
    fake_lens = torch.tensor([100, 93])
    device = torch.device("cuda:0")
    fake_feats = fake_feats.to(device)
    fake_lens = fake_lens.to(device)
    encoder.to(device)
    indies, seq_lens = encoder.tokenizer(fake_feats, fake_lens)
    print(indies)
    print(seq_lens)
    ckpt_path="/mnt/apdcephfs_sgfd/share_303841515/Tealab/user/xuelonggeng/ckpt/fsq_from_stage1_step_40499_multi_node33_120W_stage2/step_17999_only_encoder.pt"
    encoder.load_state_dict(torch.load(ckpt_path, map_location=torch.device('cpu')))
    index, seq_len = encoder.tokenizer(fake_feats, fake_lens)
    print(index)
    print(seq_len)
    import torchaudio.compliance.kaldi as kaldi
    import torchaudio
    waveform, sr = torchaudio.load("../../../test/resources/aishell-BAC009S0724W0121.wav") # 采用率是16K
    print(waveform.shape)
    print(sr)
    if sr!=16000:
        waveform = torchaudio.transforms.Resample(sr, 16000)(waveform)
        print(waveform.shape)
        print(sr)
        sr = 16000
    if waveform.shape[0] != 1:
        waveform = waveform[0,:].unsqueeze(0)

    waveform = waveform * (1 << 15)
    # Only keep key, feat, label
    mat = kaldi.fbank(waveform,
                      num_mel_bins=80,
                      frame_length=25,
                      frame_shift=10,
                      dither=1.0,
                      energy_floor=0.0,
                      sample_frequency=sr,
                      window_type="povey")
    feats = mat.unsqueeze(0).to(device)
    print(feats.shape)
    feats_length = torch.tensor([feats.shape[1]]).to(device)
    print(feats_length)
    index, seq_len = encoder.tokenizer(feats, feats_length)
    print(index)
    print(seq_len)

