from collections import defaultdict
from typing import List, Optional, Tuple, Union, Any

import math
import logging
import torch

from wenet.models.moe_comformer.best_rq_ctc_ft_with_fsq import BestRQ_CTC_FT_withFSQ
from wenet.models.moe_comformer.pure_encoder_v2r import ChunkMasker, MixtureOfEncodings
from wenet.models.moe_comformer.common import remove_duplicates_and_blank
from wenet.models.moe_comformer.mask import make_pad_mask
from wenet.models.moe_comformer.moe_train_utils import get_global_loss_dict, reset_global_loss_dict


class ASRModel(torch.nn.Module):
    """CTC-attention hybrid Encoder-Decoder model"""
    def __init__(
        self,
        vocab_size: int,
        embedder: torch.nn.Module,
        encoders: List,
        ctc: List,
        ctc_weights: List[float],
        encoder_moe_conf: dict,
        chunk_masker_conf: dict,
        **kwargs,
    ):
        assert (all([w >= 0.0 for w in ctc_weights]) and
                0.0 <= sum(ctc_weights) <= 1.0), ctc_weights
        assert len(ctc_weights) == 2
        assert len(ctc) == 2
        assert len(encoders) == 3
        super().__init__()
        # note that eos is the same as sos (equivalent ID)
        self.sos = vocab_size - 1
        self.eos = vocab_size - 1
        self.vocab_size = vocab_size

        self.embedder = embedder
        self.chunk_masker = ChunkMasker(**chunk_masker_conf)
        self.shared_encoder = torch.nn.ModuleList(encoders[0])
        self.encoders = torch.nn.ModuleList(
            [torch.nn.ModuleList(encs) for encs in encoders[1:]])
        self.dim = embedder.pos_enc.d_model
        self.encoder_moe = MixtureOfEncodings(self.dim, **encoder_moe_conf)
        self.ctc = torch.nn.ModuleList(ctc)
        if self.ctc[0] is self.ctc[1]:
            logging.info('Use shared CTC: ctc[0] is the same as ctc[1].')
        self.ctc_weights = ctc_weights
        self.projector = kwargs.get('projector', None)

    def num_encoders(self):
        return len(self.encoders)

    def set_epoch(self, epoch):
        for enc in self.shared_encoder:
            enc.set_epoch(epoch)
        for encoder in self.encoders:
            for enc in encoder:
                enc.set_epoch(epoch)
        self.encoder_moe.set_epoch(epoch)

    def gather_stat_info(self, infos: list):
        for enc in self.shared_encoder:
            enc.gather_stat_info(infos)
        for encoder in self.encoders:
            for enc in encoder:
                enc.gather_stat_info(infos)

    def get_stat(self, infos: list):
        self.encoder_moe.get_stat(infos)

    def get_debug_stat(self, infos: list, name=''):
        for i, enc in enumerate(self.shared_encoder):
            enc.get_debug_stat(infos, f'shared_encoder{i}')
        for i, encoder in enumerate(self.encoders):
            for j, enc in enumerate(encoder):
                enc.get_debug_stat(infos, f'encoder{i}.{j}')

    def forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        text: torch.Tensor,
        text_lengths: torch.Tensor,
        cv: bool = False,
    ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor],
               Optional[torch.Tensor]]:
        """Frontend + Encoder + Decoder + Calc loss

        Args:
            speech: (Batch, Length, ...)
            speech_lengths: (Batch, )
            text: (Batch, Length)
            text_lengths: (Batch,)
        """
        assert text_lengths.dim() == 1, text_lengths.shape
        assert (speech.shape[0] == speech_lengths.shape[0] == text.shape[0] ==
                text_lengths.shape[0]), (speech.shape, speech_lengths.shape,
                                         text.shape, text_lengths.shape)
        # 1. Encoder (with embedder) and CTC loss.
        encodings, embeddings, masks = self._forward_encoder(
            speech, speech_lengths, cv=cv)
        lengths = masks.squeeze(1).sum(1)  # (B,)
        loss = torch.tensor(0, device=embeddings.device)
        loss_misc = {}
        for i, xs in enumerate(encodings):
            loss_ctc = self.ctc[i](xs, lengths, text, text_lengths)
            loss = loss + self.ctc_weights[i] * loss_ctc
            loss_misc[f'ctc{i}'] = loss_ctc.item()
        for k, v in get_global_loss_dict().items():
            loss = loss + v.avg()
            loss_misc[k] = v.avg().item()
        reset_global_loss_dict()
        return loss, loss_misc

    def _forward_encoder(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
        cv: bool = False,
    ) -> List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks = self.embedder(speech, masks)
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        for i, encoder in enumerate(self.encoders):
            xs_d = xs
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings[1] = self.encoder_moe(encodings[1], encodings[0],
                                        masks, embeddings)
        return encodings, embeddings, masks

    def _forward_encoder_export_offline(self, xs, lengths, chunk_size=None):
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        return logits, xs_list[-1], out_lens

    def _forward_encoder_export_offline_proj(self, xs, lengths,
                                             chunk_size=None):
        assert self.projector is not None
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        proj, proj_lengths = self.projector(xs_list[-1], out_lens)
        return logits, xs_list[-1], out_lens, proj, proj_lengths

    def _forward_encoder_export_incremental(
        self, xs, lengths, att_caches, cache_lens, cnn_caches):
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        cache_lens = cache_lens.type(torch.int64)
        xs, _, masks = self.embedder(xs, masks, cache_lens)
        emb = self.embedder.position_encoding(
             cache_lens - att_caches.size(1), att_caches.size(1) + xs.size(1))
        n = self.num_causal_layers()
        d = self.dim
        att_caches = att_caches.reshape(xs.size(0), -1, n, d)
        cnn_caches = cnn_caches.reshape(xs.size(0), n, -1)
        i = 0
        r_att_list = []
        r_cnn_list = []
        ys = []
        for enc_list in [self.shared_encoder, self.encoders[0]]:
            for enc in enc_list:
                l = enc.num_layers()
                xs, att, cnn = enc.forward_incremental(
                    xs, emb, masks, att_caches[:, :, i: i + l],
                    cache_lens, cnn_caches[:, i: i + l])
                r_att_list += att
                r_cnn_list += cnn
                i += l
            ys.append(xs)
        ylens = masks.sum((1, 2)).type(torch.int32)
        return (self.ctc[0]._forward(ys[-1]), ys[0], ylens, ys[-1],
                torch.cat(r_att_list, 2), torch.cat(r_cnn_list, 1))

    def _forward_encoder_export_stable(self, xs, lengths, ys):
        emb = self.embedder.position_encoding(0, xs.size(1))
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        # if output_proj:
        #     assert self.projector is not None
        #     proj, proj_lengths = self.projector(xs, lengths.type(torch.int64))
        #     return self.ctc[-1]._forward(xs), xs, lengths, proj, proj_lengths
        return self.ctc[-1]._forward(xs), xs, lengths

    def _forward_encoder_export_stable_proj(self, xs, lengths, ys):
        assert self.projector is not None
        emb = self.embedder.position_encoding(0, xs.size(1))
        lengths = lengths.to(torch.int64)
        masks = ~make_pad_mask(lengths, xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        proj, proj_lengths = self.projector(xs, lengths)
        return (self.ctc[-1]._forward(xs), xs, lengths.type(torch.int32),
                proj, proj_lengths.type(torch.int32))

    def num_causal_layers(self):
        return sum(
            enc.num_layers() for enc in self.shared_encoder + self.encoders[0])

    def get_export_metadata(self):
        dim_meta = dict(D=self.dim,
                        C=self.encoders[0][0].encoders[0].conv_module.lorder,
                        L=self.num_causal_layers())
        return (self.embedder.subsampling_rate, self.embedder.right_context,
                dim_meta, self.vocab_size)

    def recognize_new(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: int = -1,
        num_decoding_left_chunks: int = -1,
        requests: dict = {},
        disable_logits: set = {},
    ) -> torch.Tensor:
        """
        Args:
            speech (torch.Tensor): (batch, max_len, feat_dim)
            speech_length (torch.Tensor): (batch, )
            decoding_chunk_size (int): decoding chunk for dynamic chunk
                trained model.
                <0: for decoding, use full chunk.
                >0: for decoding, use fixed chunk size as set.
                0: used for training, it's prohibited here

        Returns:
            dict<str, torch.Tensor>:
                key is request (mode + encoder_id)
                value is result tensor (batch, max_result_len)
        """
        assert speech.shape[0] == speech_lengths.shape[0]
        assert decoding_chunk_size != 0
        result = dict()
        encodings, _, masks = self._forward_encoder(speech, speech_lengths,
                                                    decoding_chunk_size,
                                                    num_decoding_left_chunks)
        enc_lens = masks.sum((1, 2))
        for enc, modes in requests.items():
            enc %= len(encodings)
            logits = self.ctc[enc]._forward(encodings[enc])
            for dl in disable_logits:
                logits[:,:,dl] = -float('inf')
            if 'greedy' in modes:
                result[f'greedy{enc}'] = self._greedy(logits, masks)
        return result

    def _greedy(self, ctc_probs, encoder_mask) -> List[List[int]]:
        batch_size = ctc_probs.shape[0]
        # Let's assume B = batch_size
        maxlen = encoder_mask.size(2)  # T
        encoder_out_lens = encoder_mask.squeeze(1).sum(1)
        topk_prob, topk_index = ctc_probs.topk(1, dim=2)  # (B, T, 1)
        topk_index = topk_index.view(batch_size, maxlen)  # (B, T)
        mask = make_pad_mask(encoder_out_lens, maxlen)  # (B, T)
        topk_index = topk_index.masked_fill_(mask, self.eos)  # (B, T)
        hyps = [hyp.tolist() for hyp in topk_index]
        scores = topk_prob.max(1)
        hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
        return hyps, scores


class MoeComformer_ssl_encoder(torch.nn.Module):
    """CTC-attention hybrid Encoder-Decoder model"""
    def __init__(
        self,
        embedder: torch.nn.Module,
        encoders: List,
        chunk_masker_conf: dict,
        **kwargs,
    ):
        super().__init__()

        self.embed = embedder
        self.chunk_masker = ChunkMasker(**chunk_masker_conf)
        self.shared_encoder = torch.nn.ModuleList(encoders[0])
        self.encoders = torch.nn.ModuleList(
            [torch.nn.ModuleList(encs) for encs in encoders[1:]])
        self.dim = embedder.pos_enc.d_model

    def output_size(self):
        return self.dim

    def num_encoders(self):
        return len(self.encoders)

    def set_epoch(self, epoch):
        for enc in self.shared_encoder:
            enc.set_epoch(epoch)
        for encoder in self.encoders:
            for enc in encoder:
                enc.set_epoch(epoch)
        self.encoder_moe.set_epoch(epoch)

    def gather_stat_info(self, infos: list):
        for enc in self.shared_encoder:
            enc.gather_stat_info(infos)
        for encoder in self.encoders:
            for enc in encoder:
                enc.gather_stat_info(infos)

    def get_stat(self, infos: list):
        self.encoder_moe.get_stat(infos)

    def get_debug_stat(self, infos: list, name=''):
        for i, enc in enumerate(self.shared_encoder):
            enc.get_debug_stat(infos, f'shared_encoder{i}')
        for i, encoder in enumerate(self.encoders):
            for j, enc in enumerate(encoder):
                enc.get_debug_stat(infos, f'encoder{i}.{j}')

    def forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        input_mask: torch.Tensor=None,
    ) -> Tuple[Optional[torch.Tensor],
               Optional[torch.Tensor]]:
        """Frontend + Encoder + Decoder + Calc loss

        Args:
            speech: (Batch, Length, ...)
            speech_lengths: (Batch, )
            text: (Batch, Length)
            text_lengths: (Batch,)
        Outputs:
            encodings: (Batch, Length, Dim)
            mask: (Batch, 1, Length)
        """
        # 1. Encoder (with embedder) and CTC loss.
        encodings, embeddings, masks = self._forward_encoder_input_mask(
            speech, speech_lengths, input_mask)
        # lengths = masks.squeeze(1).sum(1)  # (B,)
        return encodings, masks


    def _forward_encoder_input_mask(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        masks: torch.Tensor=None,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
    ) -> tuple[Any, Any, Any]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks_for_embed = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks_back_from_embed = self.embed(speech, masks_for_embed)
        if masks is not None:
            masks = masks.unsqueeze(1)
        else:
            masks = masks_back_from_embed
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        for i, encoder in enumerate(self.encoders):
            xs_d = xs
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings = encodings[0]
        return encodings, embeddings, masks

    def _forward_encoder(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
    ) -> tuple[Any, Any, Any]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks = self.embed(speech, masks)
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        for i, encoder in enumerate(self.encoders):
            xs_d = xs
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings = encodings[0]
        return encodings, embeddings, masks

    def _forward_encoder_export_offline(self, xs, lengths, chunk_size=None):
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        return logits, xs_list[-1], out_lens

    def _forward_encoder_export_offline_proj(self, xs, lengths,
                                             chunk_size=None):
        assert self.projector is not None
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        proj, proj_lengths = self.projector(xs_list[-1], out_lens)
        return logits, xs_list[-1], out_lens, proj, proj_lengths

    def _forward_encoder_export_incremental(
        self, xs, lengths, att_caches, cache_lens, cnn_caches):
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        cache_lens = cache_lens.type(torch.int64)
        xs, _, masks = self.embed(xs, masks, cache_lens)
        emb = self.embed.position_encoding(
             cache_lens - att_caches.size(1), att_caches.size(1) + xs.size(1))
        n = self.num_causal_layers()
        d = self.dim
        att_caches = att_caches.reshape(xs.size(0), -1, n, d)
        cnn_caches = cnn_caches.reshape(xs.size(0), n, -1)
        i = 0
        r_att_list = []
        r_cnn_list = []
        ys = []
        for enc_list in [self.shared_encoder, self.encoders[0]]:
            for enc in enc_list:
                l = enc.num_layers()
                xs, att, cnn = enc.forward_incremental(
                    xs, emb, masks, att_caches[:, :, i: i + l],
                    cache_lens, cnn_caches[:, i: i + l])
                r_att_list += att
                r_cnn_list += cnn
                i += l
            ys.append(xs)
        ylens = masks.sum((1, 2)).type(torch.int32)
        return (self.ctc[0]._forward(ys[-1]), ys[0], ylens, ys[-1],
                torch.cat(r_att_list, 2), torch.cat(r_cnn_list, 1))

    def _forward_encoder_export_stable(self, xs, lengths, ys):
        emb = self.embed.position_encoding(0, xs.size(1))
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        # if output_proj:
        #     assert self.projector is not None
        #     proj, proj_lengths = self.projector(xs, lengths.type(torch.int64))
        #     return self.ctc[-1]._forward(xs), xs, lengths, proj, proj_lengths
        return self.ctc[-1]._forward(xs), xs, lengths

    def _forward_encoder_export_stable_proj(self, xs, lengths, ys):
        assert self.projector is not None
        emb = self.embed.position_encoding(0, xs.size(1))
        lengths = lengths.to(torch.int64)
        masks = ~make_pad_mask(lengths, xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        proj, proj_lengths = self.projector(xs, lengths)
        return (self.ctc[-1]._forward(xs), xs, lengths.type(torch.int32),
                proj, proj_lengths.type(torch.int32))

    def num_causal_layers(self):
        return sum(
            enc.num_layers() for enc in self.shared_encoder + self.encoders[0])

    def get_export_metadata(self):
        dim_meta = dict(D=self.dim,
                        C=self.encoders[0][0].encoders[0].conv_module.lorder,
                        L=self.num_causal_layers())
        return (self.embed.subsampling_rate, self.embed.right_context,
                dim_meta, self.vocab_size)

    def recognize_new(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: int = -1,
        num_decoding_left_chunks: int = -1,
        requests: dict = {},
        disable_logits: set = {},
    ) -> torch.Tensor:
        """
        Args:
            speech (torch.Tensor): (batch, max_len, feat_dim)
            speech_length (torch.Tensor): (batch, )
            decoding_chunk_size (int): decoding chunk for dynamic chunk
                trained model.
                <0: for decoding, use full chunk.
                >0: for decoding, use fixed chunk size as set.
                0: used for training, it's prohibited here

        Returns:
            dict<str, torch.Tensor>:
                key is request (mode + encoder_id)
                value is result tensor (batch, max_result_len)
        """
        assert speech.shape[0] == speech_lengths.shape[0]
        assert decoding_chunk_size != 0
        result = dict()
        encodings, _, masks = self._forward_encoder(speech, speech_lengths,
                                                    decoding_chunk_size,
                                                    num_decoding_left_chunks)
        enc_lens = masks.sum((1, 2))
        for enc, modes in requests.items():
            enc %= len(encodings)
            logits = self.ctc[enc]._forward(encodings[enc])
            for dl in disable_logits:
                logits[:,:,dl] = -float('inf')
            if 'greedy' in modes:
                result[f'greedy{enc}'] = self._greedy(logits, masks)
        return result

    def _greedy(self, ctc_probs, encoder_mask) -> List[List[int]]:
        batch_size = ctc_probs.shape[0]
        # Let's assume B = batch_size
        maxlen = encoder_mask.size(2)  # T
        encoder_out_lens = encoder_mask.squeeze(1).sum(1)
        topk_prob, topk_index = ctc_probs.topk(1, dim=2)  # (B, T, 1)
        topk_index = topk_index.view(batch_size, maxlen)  # (B, T)
        mask = make_pad_mask(encoder_out_lens, maxlen)  # (B, T)
        topk_index = topk_index.masked_fill_(mask, self.eos)  # (B, T)
        hyps = [hyp.tolist() for hyp in topk_index]
        scores = topk_prob.max(1)
        hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
        return hyps, scores

from vector_quantize_pytorch import FSQ

class MoeComformer_ssl_encoder_with_FSQ(torch.nn.Module):
    """CTC-attention hybrid Encoder-Decoder model"""
    def __init__(
        self,
        embedder: torch.nn.Module,
        encoders: List,
        chunk_masker_conf: dict,
        **kwargs,
    ):
        super().__init__()

        self.embed = embedder
        self.chunk_masker = ChunkMasker(**chunk_masker_conf)
        self.shared_encoder = torch.nn.ModuleList(encoders[0])
        from vector_quantize_pytorch import FSQ
        self.encoders = torch.nn.ModuleList(
            [torch.nn.ModuleList(encs) for encs in encoders[1:]])
        self.dim = embedder.pos_enc.d_model
        self.fsq = FSQ(
            levels=[3, 3, 3, 3, 3, 3, 3, 3],
            dim=int(self.dim),  # 内部先将1280降为8维，再进行FSQ
            preserve_symmetry=True,  # 如果改变levels值（比如维度数量改变），可以确认下这个参数对量化结果的影响（post proj之前）
        )

    def output_size(self):
        return self.dim

    def num_encoders(self):
        return len(self.encoders)

    def set_epoch(self, epoch):
        for enc in self.shared_encoder:
            enc.set_epoch(epoch)
        for encoder in self.encoders:
            for enc in encoder:
                enc.set_epoch(epoch)
        self.encoder_moe.set_epoch(epoch)

    def gather_stat_info(self, infos: list):
        for enc in self.shared_encoder:
            enc.gather_stat_info(infos)
        for encoder in self.encoders:
            for enc in encoder:
                enc.gather_stat_info(infos)

    def get_stat(self, infos: list):
        self.encoder_moe.get_stat(infos)

    def get_debug_stat(self, infos: list, name=''):
        for i, enc in enumerate(self.shared_encoder):
            enc.get_debug_stat(infos, f'shared_encoder{i}')
        for i, encoder in enumerate(self.encoders):
            for j, enc in enumerate(encoder):
                enc.get_debug_stat(infos, f'encoder{i}.{j}')

    def forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        input_mask: torch.Tensor=None,
    ) -> Tuple[Optional[torch.Tensor],
               Optional[torch.Tensor]]:
        """Frontend + Encoder + Decoder + Calc loss

        Args:
            speech: (Batch, Length, ...)
            speech_lengths: (Batch, )
            text: (Batch, Length)
            text_lengths: (Batch,)
        Outputs:
            encodings: (Batch, Length, Dim)
            mask: (Batch, 1, Length)
        """
        # 1. Encoder (with embedder) and CTC loss.
        encodings, embeddings, masks = self._forward_encoder_input_mask(
            speech, speech_lengths, input_mask)
        # lengths = masks.squeeze(1).sum(1)  # (B,)
        return encodings, masks


    def _forward_encoder_input_mask(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        masks: torch.Tensor=None,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
    ) -> tuple[Any, Any, Any]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks_for_embed = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks_back_from_embed = self.embed(speech, masks_for_embed)
        if masks is not None:
            masks = masks.unsqueeze(1)
        else:
            masks = masks_back_from_embed
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        # print("过完share encoder 的 xs.shape", xs.shape)
        quantize, indices = self.fsq(xs)
        # print("quantize shape:", quantize.shape)
        for i, encoder in enumerate(self.encoders):
            xs_d = quantize
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings = encodings[0]
        return encodings, embeddings, masks

    def _forward_encoder(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: Union[int, list, torch.Tensor] = -1,
        num_decoding_left_chunks: int = -1,
        use_chunk_mask: bool = None,
    ) -> tuple[Any, Any, Any]:
        if hasattr(decoding_chunk_size, '__len__') and len(decoding_chunk_size) > 1:
            decoding_chunk_size = decoding_chunk_size[0]
        masks = ~make_pad_mask(speech_lengths, speech.size(1)).unsqueeze(1)  # (B, 1, T)
        xs, embeddings, masks = self.embed(speech, masks)
        chunk_masks = self.chunk_masker(masks, decoding_chunk_size,
                                        num_decoding_left_chunks)
        for enc in self.shared_encoder:
            xs = enc(xs, embeddings, masks, chunk_masks, use_chunk_mask)
        encodings = []
        for i, encoder in enumerate(self.encoders):
            xs_d = xs
            for enc in encoder:
                xs_d = enc(xs_d, embeddings, masks, chunk_masks, use_chunk_mask)
            encodings.append(xs_d)
        encodings = encodings[0]
        return encodings, embeddings, masks

    def _forward_encoder_export_offline(self, xs, lengths, chunk_size=None):
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        return logits, xs_list[-1], out_lens

    def _forward_encoder_export_offline_proj(self, xs, lengths,
                                             chunk_size=None):
        assert self.projector is not None
        if chunk_size is None:
            chunk_size = torch.tensor([-1], device=xs.device)
        elif type(chunk_size) == torch.Tensor:
            chunk_size = chunk_size.type(torch.int64)
        xs_list, emb, masks = self._forward_encoder(xs, lengths, chunk_size,
                                                    use_chunk_mask=True)
        logits = self.ctc[-1]._forward(xs_list[-1])
        out_lens = masks.sum((1, 2)).type(torch.int32)
        proj, proj_lengths = self.projector(xs_list[-1], out_lens)
        return logits, xs_list[-1], out_lens, proj, proj_lengths

    def _forward_encoder_export_incremental(
        self, xs, lengths, att_caches, cache_lens, cnn_caches):
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        cache_lens = cache_lens.type(torch.int64)
        xs, _, masks = self.embed(xs, masks, cache_lens)
        emb = self.embed.position_encoding(
             cache_lens - att_caches.size(1), att_caches.size(1) + xs.size(1))
        n = self.num_causal_layers()
        d = self.dim
        att_caches = att_caches.reshape(xs.size(0), -1, n, d)
        cnn_caches = cnn_caches.reshape(xs.size(0), n, -1)
        i = 0
        r_att_list = []
        r_cnn_list = []
        ys = []
        for enc_list in [self.shared_encoder, self.encoders[0]]:
            for enc in enc_list:
                l = enc.num_layers()
                xs, att, cnn = enc.forward_incremental(
                    xs, emb, masks, att_caches[:, :, i: i + l],
                    cache_lens, cnn_caches[:, i: i + l])
                r_att_list += att
                r_cnn_list += cnn
                i += l
            ys.append(xs)
        ylens = masks.sum((1, 2)).type(torch.int32)
        return (self.ctc[0]._forward(ys[-1]), ys[0], ylens, ys[-1],
                torch.cat(r_att_list, 2), torch.cat(r_cnn_list, 1))

    def _forward_encoder_export_stable(self, xs, lengths, ys):
        emb = self.embed.position_encoding(0, xs.size(1))
        masks = ~make_pad_mask(
            lengths.type(torch.int64), xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        # if output_proj:
        #     assert self.projector is not None
        #     proj, proj_lengths = self.projector(xs, lengths.type(torch.int64))
        #     return self.ctc[-1]._forward(xs), xs, lengths, proj, proj_lengths
        return self.ctc[-1]._forward(xs), xs, lengths

    def _forward_encoder_export_stable_proj(self, xs, lengths, ys):
        assert self.projector is not None
        emb = self.embed.position_encoding(0, xs.size(1))
        lengths = lengths.to(torch.int64)
        masks = ~make_pad_mask(lengths, xs.size(1)).unsqueeze(1)
        for enc in self.encoders[-1]:
            xs = enc(xs, emb, masks)
        xs = self.encoder_moe(xs, ys, masks, emb)
        proj, proj_lengths = self.projector(xs, lengths)
        return (self.ctc[-1]._forward(xs), xs, lengths.type(torch.int32),
                proj, proj_lengths.type(torch.int32))

    def num_causal_layers(self):
        return sum(
            enc.num_layers() for enc in self.shared_encoder + self.encoders[0])

    def get_export_metadata(self):
        dim_meta = dict(D=self.dim,
                        C=self.encoders[0][0].encoders[0].conv_module.lorder,
                        L=self.num_causal_layers())
        return (self.embed.subsampling_rate, self.embed.right_context,
                dim_meta, self.vocab_size)

    def recognize_new(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        decoding_chunk_size: int = -1,
        num_decoding_left_chunks: int = -1,
        requests: dict = {},
        disable_logits: set = {},
    ) -> torch.Tensor:
        """
        Args:
            speech (torch.Tensor): (batch, max_len, feat_dim)
            speech_length (torch.Tensor): (batch, )
            decoding_chunk_size (int): decoding chunk for dynamic chunk
                trained model.
                <0: for decoding, use full chunk.
                >0: for decoding, use fixed chunk size as set.
                0: used for training, it's prohibited here

        Returns:
            dict<str, torch.Tensor>:
                key is request (mode + encoder_id)
                value is result tensor (batch, max_result_len)
        """
        assert speech.shape[0] == speech_lengths.shape[0]
        assert decoding_chunk_size != 0
        result = dict()
        encodings, _, masks = self._forward_encoder(speech, speech_lengths,
                                                    decoding_chunk_size,
                                                    num_decoding_left_chunks)
        enc_lens = masks.sum((1, 2))
        for enc, modes in requests.items():
            enc %= len(encodings)
            logits = self.ctc[enc]._forward(encodings[enc])
            for dl in disable_logits:
                logits[:,:,dl] = -float('inf')
            if 'greedy' in modes:
                result[f'greedy{enc}'] = self._greedy(logits, masks)
        return result

    def _greedy(self, ctc_probs, encoder_mask) -> List[List[int]]:
        batch_size = ctc_probs.shape[0]
        # Let's assume B = batch_size
        maxlen = encoder_mask.size(2)  # T
        encoder_out_lens = encoder_mask.squeeze(1).sum(1)
        topk_prob, topk_index = ctc_probs.topk(1, dim=2)  # (B, T, 1)
        topk_index = topk_index.view(batch_size, maxlen)  # (B, T)
        mask = make_pad_mask(encoder_out_lens, maxlen)  # (B, T)
        topk_index = topk_index.masked_fill_(mask, self.eos)  # (B, T)
        hyps = [hyp.tolist() for hyp in topk_index]
        scores = topk_prob.max(1)
        hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
        return hyps, scores



class SpliceProjector(torch.nn.Module):
    def __init__(self, idim: int, odim: int, group_size=2):
        super().__init__()
        self.group_size = group_size
        self.linear = torch.nn.Linear(idim * group_size, odim)

    def forward(self, xs, lengths):
        B, T, D = xs.size()
        #pad_len = (self.group_size - T % self.group_size) % self.group_size
        pad_len = (T + self.group_size - 1) // self.group_size * self.group_size - T
        xs = torch.nn.functional.pad(xs, (0, 0, 0, pad_len), mode='replicate')
        xs = self.linear(
            xs.view(B, (T + pad_len) // self.group_size, self.group_size * D))
        lengths = (lengths + self.group_size - 1) // self.group_size
        return xs, lengths
        

def init_model(configs):
    from wenet.models.moe_comformer.ctc import CTC_New
    from wenet.models.moe_comformer.embedder import init_embedder
    from wenet.models.moe_comformer.pure_encoder_v2r import ConformerEncoder
    from wenet.utils.merge_dicts import merge_dicts
    embedder = init_embedder(configs['input_dim'], **configs['embedder_conf']) # 2维卷积 实现了6倍下采样
    dim = embedder.pos_enc.d_model
    odim = configs['output_dim']
    use_positional = configs['embedder_conf']['pos_enc_layer_type'] != 'no_pos'
    if configs['model_conf'].get('shared_ctc', False): # true
        ctc_list = [CTC_New(dim, odim)] * 2 # 两个对象引用，但指向一个对象
    else:
        ctc_list = [CTC_New(dim, odim) for _ in range(2)],
    common_conf = configs.get('common_conf', {})
    encoder_common_conf = configs.get('encoder_common_conf', {})
    if configs.get('projector', None):
        projector = configs['projector']
    elif configs.get('projector_conf', None):
        projector = SpliceProjector(**configs['projector_conf'])
    else:
        projector = None
    return ASRModel(
        vocab_size=odim,
        embedder=embedder,
        encoders=[[ConformerEncoder(
                       dim, use_positional_embedding=use_positional,
                       **merge_dicts(common_conf, encoder_common_conf, c),
                   ) for c in conf] for conf in configs['encoder_conf']],
        ctc=ctc_list,
        projector=projector,
        **configs['model_conf']
    )
# from wenet.models.moe_comformer.bestrq import BestRQModel
from wenet.models.moe_comformer.best_rq_xlgeng import BestRQ
from wenet.models.moe_comformer.best_rq_ctc_ft import BestRQ_CTC_FT
def init_model_ssl(configs):
    from wenet.models.moe_comformer.embedder import init_embedder
    from wenet.models.moe_comformer.pure_encoder_v2r import ConformerEncoder
    from wenet.utils.merge_dicts import merge_dicts
    embedder = init_embedder(configs['input_dim'], **configs['embedder_conf']) # 2维卷积 实现了6倍下采样
    dim = embedder.pos_enc.d_model
    odim = configs['output_dim']
    use_positional = configs['embedder_conf']['pos_enc_layer_type'] != 'no_pos'
    common_conf = configs.get('common_conf', {})
    encoder_common_conf = configs.get('encoder_common_conf', {})

    model_type = configs.get("model", "moe_ssl")
    if model_type == "moe_ssl":
        print(f'model type is {model_type}，default model')
        ssl_encoder = MoeComformer_ssl_encoder(
            embedder=embedder,
            encoders=[[ConformerEncoder(
                dim, use_positional_embedding=use_positional,
                **merge_dicts(common_conf, encoder_common_conf, c),
            ) for c in conf] for conf in configs['encoder_conf']],
            **configs['model_conf']
        )
        model = BestRQ(ssl_encoder)
    elif model_type == "moe_ssl_ctc_fn":
        print(f'model type is {model_type},得到的结果是CTC 模型')
        ssl_encoder = MoeComformer_ssl_encoder(
            embedder=embedder,
            encoders=[[ConformerEncoder(
                dim, use_positional_embedding=use_positional,
                **merge_dicts(common_conf, encoder_common_conf, c),
            ) for c in conf] for conf in configs['encoder_conf']],
            **configs['model_conf']
        )
        model = BestRQ_CTC_FT(ssl_encoder)
    elif model_type == "moe_ssl_ctc_fn_FSQ":
        print(f'model type is {model_type},得到的结果是CTC 模型')
        ssl_encoder = MoeComformer_ssl_encoder_with_FSQ(
            embedder=embedder,
            encoders=[[ConformerEncoder(
                dim, use_positional_embedding=use_positional,
                **merge_dicts(common_conf, encoder_common_conf, c),
            ) for c in conf] for conf in configs['encoder_conf']],
            **configs['model_conf']
        )
        model = BestRQ_CTC_FT_withFSQ(ssl_encoder)
    else:
        raise ValueError(f"{model_type} is not supported.")
    return model

def init_moe_comformer(configs):
    from wenet.models.moe_comformer.embedder import init_embedder
    from wenet.models.moe_comformer.pure_encoder_v2r import ConformerEncoder
    from wenet.utils.merge_dicts import merge_dicts
    embedder = init_embedder(configs['input_dim'], **configs['embedder_conf'])  # 2维卷积 实现了6倍下采样
    dim = embedder.pos_enc.d_model
    odim = configs['output_dim']
    use_positional = configs['embedder_conf']['pos_enc_layer_type'] != 'no_pos'
    common_conf = configs.get('common_conf', {})
    encoder_common_conf = configs.get('encoder_common_conf', {})
    ssl_encoder = MoeComformer_ssl_encoder(
        embedder=embedder,
        encoders=[[ConformerEncoder(
            dim, use_positional_embedding=use_positional,
            **merge_dicts(common_conf, encoder_common_conf, c),
        ) for c in conf] for conf in configs['encoder_conf']],
        **configs['model_conf']
    )
    return ssl_encoder


def init_moe_comformer_with_FSQ(configs):
    """"""
    from wenet.models.moe_comformer.embedder import init_embedder
    from wenet.models.moe_comformer.pure_encoder_v2r import ConformerEncoder
    from wenet.utils.merge_dicts import merge_dicts
    embedder = init_embedder(configs['input_dim'], **configs['embedder_conf'])  # 2维卷积 实现了6倍下采样
    dim = embedder.pos_enc.d_model
    odim = configs['output_dim']
    use_positional = configs['embedder_conf']['pos_enc_layer_type'] != 'no_pos'
    common_conf = configs.get('common_conf', {})
    encoder_common_conf = configs.get('encoder_common_conf', {})
    ssl_encoder = MoeComformer_ssl_encoder_with_FSQ(
        embedder=embedder,
        encoders=[[ConformerEncoder(
            dim, use_positional_embedding=use_positional,
            **merge_dicts(common_conf, encoder_common_conf, c),
        ) for c in conf] for conf in configs['encoder_conf']],
        **configs['model_conf']
    )
    return ssl_encoder


def init_model_ssl_2(configs):
    """"""
    from best_rq_pytorch.best_rq import BestRQ, BestRQPretrainWrapper
    from best_rq_pytorch.conformer import ConformerWrapper
    brq = BestRQ(
        codebook_size=1024,
        codebook_dim=16,
        sample_rate=16_000,
        n_mels=80,
        win_length=400,  # 25ms
        hop_length=160,  # 10ms
        conformer=ConformerWrapper(
            num_tokens=1024,
            conformer=dict(
                dim=1024,
                depth=24,
                heads=16,
                conv_kernel_size=5,
                ff_mult=4,
                attn_dropout=0.1,
                ff_dropout=0.1,
                conv_dropout=0.1,
                attn_flash=False
            )
        )
    )

    train_wrapper = BestRQPretrainWrapper(
        model=brq,
        mask_prob=0.90
    )
    return train_wrapper


def merge_ckpt(main_ckpt: str, proj_ckpt: str, out_ckpt: str):
    state_dict = torch.load(main_ckpt, map_location='cpu')
    state_dict = state_dict.get('model', state_dict)  # in case there is 'opitm'
    proj_state_dict = torch.load(proj_ckpt, map_location='cpu')
    for k, v in proj_state_dict.items():
        # originally '1.xxx'
        k = 'projector.linear.' + k[2:]
        state_dict[k] = v
    torch.save(state_dict, out_ckpt)


def main_test_fsq():
    import torch
    from vector_quantize_pytorch import FSQ

    # 初始化 FSQ 模块，参数与您的代码一致
    fsq = FSQ(
        levels=[3, 3, 3, 3, 3, 3, 3, 3],
        dim=128,
        preserve_symmetry=True,
    )

    # 假设有一个输入张量，例如 (batch_size, sequence_length, feature_dim=1280)
    x = torch.randn(2, 10, 128)

    # 前向传播
    quantized, indices = fsq(x)
    # 检查是否有可用的GPU
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"Using device: {device}")

    # 将模型和数据移动到设备上
    fsq.to(device)
    x = x.to(device)

    # 前向传播
    quantized, indices = fsq(x)

    print("quantized shape",quantized.shape)  # 输出: torch.Size([2, 10, 128])，与输入形状相同
    print("quantized",quantized)  # 输出: torch.Size([2, 10, 128])，与输入形状相同

    print("indices shape",indices.shape)  # 输出: torch.Size([2, 100])，每个位置对应一个离散代码索引
    print(indices)
    # 计算量化损失（通常在VQ-VAE的总损失中使用）
    commitment_loss = torch.nn.functional.mse_loss(quantized, x.detach())
    print(f"Commitment loss: {commitment_loss.item()}")
    codes = fsq.indices_to_codes(indices)  # codes.shape 为 (1, 1, 8)

    print("输入的索引 (indices):")
    print(indices)
    print("\n索引对应的具体量化代码 (8维向量，每个维度对应一个整数值):")
    print(codes.shape)
    print(codes)

import torch
from vector_quantize_pytorch import FSQ

def main_test_fsq_train():
    # 初始化 FSQ 模块，参数与您的代码一致
    fsq = FSQ(
        levels=[3, 3, 3, 3, 3, 3, 3, 3],
        dim=128,
        preserve_symmetry=False,
    )

    # 假设有一个输入张量，例如 (batch_size, sequence_length, feature_dim=1280)
    x = torch.randn(2, 10, 128)

    # 检查是否有可用的GPU
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print(f"Using device: {device}")

    # 将模型和数据移动到设备上
    fsq.to(device)
    x = x.to(device)
    print(f'x : {x}')

    # 使用优化器来更新模型
    optimizer = torch.optim.Adam(fsq.parameters(), lr=3e-2)

    # 模拟训练 1000 步
    for step in range(10000):
        # 前向传播
        quantized, indices = fsq(x)
        # 计算量化损失（通常在VQ-VAE的总损失中使用）
        commitment_loss = torch.nn.functional.mse_loss(quantized, x.detach())
        # 反向传播
        optimizer.zero_grad()
        commitment_loss.backward()
        # 更新模型参数
        optimizer.step()

        # 每 100 步打印一次损失值
        if step % 1000 == 0:
            print(f"Step {step}, Commitment loss: {commitment_loss.item()}")

    print("Training finished!")
    quantized, indices = fsq(x)
    print(f'quantized : {quantized}')



if __name__ == '__main__':
    """"""
    from gxl_ai_utils.utils import utils_file
    now = utils_file.do_get_now_time_by_second()
    # main_test_fsq()
    # main_test_fsq_train()
    # encoder = init_moe_comformer_with_FSQ()
    print(f"Test completed ，cost time {utils_file.do_get_elapsed_time(now)}s")

