import logging
from collections import namedtuple
from functools import partial
from typing import List, Optional, Union, Tuple

import torch

# from megatron.core import InferenceParams
from megatron.core import InferenceParams, parallel_state, tensor_parallel
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.models.gpt import GPTModel
from megatron.core.transformer.enums import ModelType
# from megatron.core.models.speech.whisper_model import WhisperMegatron
# from megatron.core.models.vision.multimodal_projector import MultimodalProjector
from megatron.core.transformer.spec_utils import ModuleSpec, build_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.training.initialize import initialize_megatron
from megatron.training.training import setup_model_and_optimizer
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.models.speech.whisper_layer_specs import get_whisper_layer_with_transformer_engine_spec
import torch.nn as nn
from megatron.core.transformer.custom_layers.transformer_engine import (
    TEColumnParallelLinear,
    TEDotProductAttention,
    TELayerNormColumnParallelLinear,
    TENorm,
    TERowParallelLinear,
)
from megatron.core.models.gpt.gpt_layer_specs import (
    get_gpt_layer_local_spec,
    get_gpt_layer_with_transformer_engine_spec,
)
import pdb
from megatron.core.models.common.speech_module.speech_module import SpeechModule
from torchaudio.models import Conformer
from wenet.transformer.asr_encoder import init_model
import yaml
from wenet.utils.checkpoint import load_checkpoint as wenet_load_checkpoint


class PretrainedMoEConformer(SpeechModule):
    def __init__(
        self,
        yaml_path,
        pretrained_ckp_path,
        config=None
    ):
        super().__init__(config=config)
        with open(yaml_path, 'r') as fin:
            configs = yaml.load(fin, Loader=yaml.FullLoader)        
        model = init_model(configs)
        wenet_load_checkpoint(model, pretrained_ckp_path)
        self.model = model



    def forward(
        self,
        speech: torch.Tensor,
        speech_lengths: torch.Tensor,
        # text: torch.Tensor,
        # text_lengths: torch.Tensor,
        cv: bool = False,
    ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor],
               Optional[torch.Tensor]]:
        """Frontend + Encoder + Decoder + Calc loss

        Args:
            speech: (Batch, Length, ...)
            speech_lengths: (Batch, )
            text: (Batch, Length)
            text_lengths: (Batch,)
        """
        # assert text_lengths.dim() == 1, text_lengths.shape
        # assert (speech.shape[0] == speech_lengths.shape[0] == text.shape[0] ==
        #         text_lengths.shape[0]), (speech.shape, speech_lengths.shape,
        #                                  text.shape, text_lengths.shape)
        # 1. Encoder (with embedder) and CTC loss.
        encodings, embeddings, masks = self.model._forward_encoder(
            speech, speech_lengths, cv=cv)
        return encodings, embeddings, masks


class MoEConformerLLM(GPTModel):
    """MoEConformerLLM multi-modal model using Whisper for audio input."""

    def __init__(
        self,
        language_transformer_config: TransformerConfig,
        language_transformer_layer_spec: ModuleSpec,
        language_vocab_size: int,
        language_max_sequence_length: int,
        # speech_transformer_config: TransformerConfig,
        # speech_transformer_layer_spec: ModuleSpec,
        drop_speech_class_token: bool,
        # speech_projection_config: TransformerConfig,
        # speech_projection_layer_spec: ModuleSpec,
        # speech_projection_type: str = "gather_affine",
        allow_missing_speech_projection_checkpoint: bool = False,
        parallel_output: bool = True,
        language_position_embedding_type: str = 'learned_absolute',
        language_rotary_percent: float = 1.0,
        pre_process: bool = True,
        post_process: bool = True,
        # seq_length: int = 1500,
        # input_dim: int = 128,
        # conv1_out_dim: int = 1280,
        # conv_kernel_size: int = 3,
        # conv_stride: int = 1,
        language_rotary_base: int = 10000,
        share_embeddings_and_output_weights: bool = False,
        conformer_yaml_path: str = "",
        conformer_pretrained_ckp_path: str = ""
    ) -> None:
        super().__init__(
            config=language_transformer_config,
            transformer_layer_spec=language_transformer_layer_spec,
            vocab_size=language_vocab_size,
            max_sequence_length=language_max_sequence_length,
            parallel_output=parallel_output,
            position_embedding_type=language_position_embedding_type,
            rotary_percent=language_rotary_percent,
            pre_process=pre_process,
            post_process=post_process,
            rotary_base=language_rotary_base,
            share_embeddings_and_output_weights=share_embeddings_and_output_weights
        )       
        if has_config_logger_enabled(language_transformer_config):
            log_config_to_disk(language_transformer_config, locals(), prefix=type(self).__name__)

        logging.getLogger(__name__).warning(
            "MoEConformerLLM model is under active development. It may be missing features and its methods may change."
        )
        if pre_process:
            self.speech_model = PretrainedMoEConformer(
                yaml_path=conformer_yaml_path,
                pretrained_ckp_path=conformer_pretrained_ckp_path,
                config=language_transformer_config
            )

            self.speech_embedding_dim = self.speech_model.model.ctc[0].lo.in_features
            self.llm_hidden = self.config.hidden_size

            self.speech_projection = nn.Linear(
                self.speech_embedding_dim,
                self.llm_hidden
            )

            # self.speech_model = WhisperMegatron(
            #     transformer_config=speech_transformer_config,
            #     transformer_layer_spec=speech_transformer_layer_spec,
            #     input_dim=input_dim,
            #     conv1_out_dim=conv1_out_dim,
            #     conv_kernel_size=conv_kernel_size,
            #     conv_stride=conv_stride,
            #     seq_length=seq_length,
            #     pre_process=True,
            #     post_process=False,
            # )
            self._drop_speech_class_token = drop_speech_class_token
            # Map (intermediate) speech model outputs to the language model input dimension.
            self.audio_token_index = 151665
            self.ignore_index = -100
            self.pad_token_id = 151643
            # self.speech_projection = MultimodalProjector(
            #     speech_projection_config,
            #     speech_projection_layer_spec,
            #     speech_projection_type,
            #     speech_transformer_config.hidden_size,  # input size to the projection.
            # )
        else:
            self.speech_model = None
            self.speech_projection = None
        # self.speech_projection = tensor_parallel.ColumnParallelLinear(
        #     speech_transformer_config.hidden_size,
        #     speech_projection_config.hidden_size,
        #     config=speech_projection_config,
        #     init_method=speech_projection_config.init_method,
        #     bias=True,
        #     skip_bias_add=False,
        #     gather_output=True,
        #     skip_weight_param_allocation=pre_process and share_embeddings_and_output_weights
        # )
        # This allows ignoring missing weights for the speech projection during checkpoint loading.
        if allow_missing_speech_projection_checkpoint:
            speech_projection_param_names = [
                f"speech_projection.{name}"
                for name in self.speech_projection.state_dict().keys()
            ]
            self.speech_projection.register_load_state_dict_post_hook(
                partial(_load_state_dict_hook_ignore_param_names, speech_projection_param_names)
            )

    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """

        input_lengths = (input_lengths - 5) // 6
        output_lengths = input_lengths        
        return input_lengths, output_lengths

    def _merge_input_ids_with_audio_features(
        self, audio_features, num_audio_tokens, inputs_embeds, input_ids, attention_mask, labels
    ):
        """
        Merge input_ids with with audio features into final embeddings

        Args:
            audio_features (`torch.Tensor` of shape `(num_audios, max_audio_tokens, embed_dim)`):
                All audio vectors of all audios in the batch
            num_audio_tokens (`torch.LongTensor` of shape `(num_audios)`):
                The length of audio embeddings of each audio as stacked in `audio_features`
            inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, embed_dim)`):
                Token embeddings before merging with audio embeddings
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Input_ids of tokens, possibly filled with audio token
            attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Mask to avoid performing attention on padding token indices.
            labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*)
                labels need to be recalculated to support training (if provided)
        Returns:
            final_embedding, final_attention_mask, final_labels, position_ids, final_input_ids

        Explanation:
            each audio has variable length embeddings, with length specified by num_audio_tokens
            audio_features is concatenation of all audio embed vectors
            task: fill each <|AUDIO|> with the correct number of audio embeddings
            Example:
                X (5 tokens), Y (3 tokens), Z (8 tokens)
                X, Y are in the same sequence (in-context learning)
            if right padding
                input_ids: [
                    a b c d e f X g h i j k Y l m
                    o p q r Z s t u v _ _ _ _ _ _
                ]
                input_ids should be: [
                    a b c d e f X X X X X g h i j k Y Y Y l m
                    o p q r Z Z Z Z Z Z Z Z s t u v _ _ _ _ _
                ]
                labels should be: [
                    a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
                    o p q r _ _ _ _ _ _ _ _ s t u v _ _ _ _ _
                ]
            elif left padding
                input_ids: [
                    a b c d e f X g h i j k Y l m
                    _ _ _ _ _ _ o p q r Z s t u v
                ]
                input_ids should be: [
                    a b c d e f X X X X X g h i j k Y Y Y l m
                    _ _ _ _ _ o p q r Z Z Z Z Z Z Z Z s t u v
                ]
                labels should be: [
                    a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
                    _ _ _ _ _ o p q r _ _ _ _ _ _ _ _ s t u v
                ]
            Edge cases:
                * If tokens are same but audio token sizes are different, then cannot infer left or right padding
                ```python
                url1 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
                audio1, _ = librosa.load(BytesIO(urlopen(url1).read()), sr=processor.feature_extractor.sampling_rate)
                url2 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav"
                audio2, _ = librosa.load(BytesIO(urlopen(url2).read()), sr=processor.feature_extractor.sampling_rate)
                prompts = [
                    "[INST] <|AUDIO|>\nWhat is that in this audio? [/INST]",
                    "[INST] <|AUDIO|>\nWhat is that in this audio? [/INST]",
                ]
                inputs = processor(text=prompts, audios=[audio1, audio2], return_tensors='pt', padding=True).to("cuda")
                    audio1 has 101 tokens, while audio2 has 72 tokens
                ```

                input_ids: [
                    a b c d X g h
                    i j Y k l m n
                ]
                where X is 3 tokens while Y is 5, this mean after merge
                if left-padding (batched generation)
                    input_ids should be: [
                        _ _ a b c d X X X g h
                        i j Y Y Y Y Y k l m n
                    ]
                elif (right padding) (training)
                    input_ids should be: [
                        a b c d X X X g h _ _
                        i j Y Y Y Y Y k l m n
                    ]
        """       
        num_audios, max_audio_tokens, embed_dim = audio_features.shape
        audio_features_mask = torch.arange(max_audio_tokens).expand(num_audios, max_audio_tokens).to(
            num_audio_tokens.device
        ) < num_audio_tokens.unsqueeze(1)
        masked_audio_features = audio_features[audio_features_mask].view(-1, embed_dim)
        batch_size, sequence_length = input_ids.shape
        _left_padding = torch.any(attention_mask[:, 0] == 0)
        _right_padding = torch.any(attention_mask[:, -1] == 0)

        left_padding = True
        if batch_size > 1:
            if _left_padding and not _right_padding:
                left_padding = True
            elif not _left_padding and _right_padding:
                left_padding = False
            elif not _left_padding and not _right_padding:
                # both side is 1, so cannot tell
                #left_padding = self.padding_side == "left"
                left_padding = False
            else:
                # invalid attention_mask
                raise ValueError(f"both side of attention_mask has zero, invalid. {attention_mask}")

        # 1. Create a mask to know where special audio tokens are
        special_audio_token_mask = input_ids == self.audio_token_index
        num_special_audio_tokens = torch.sum(special_audio_token_mask, dim=-1)

        # In case the Audio model or the Language model has been offloaded to CPU, we need to manually
        # set the corresponding tensors into their correct target device.
        target_device = inputs_embeds.device
        attention_mask = attention_mask.to(target_device)
        input_ids = input_ids.to(target_device)
        num_audio_tokens = num_audio_tokens.to(target_device)
        batch_indices, non_audio_indices = torch.where(
            (input_ids != self.audio_token_index) & (attention_mask == 1)
        )

        # 2. Compute the positions where text should be written
        # Calculate new positions for text tokens in merged audio-text sequence.
        # `special_audio_token_mask` identifies audio tokens. Each audio token will be replaced by `audio_feat_lengths - 1` text tokens.
        # `torch.cumsum` computes how each audio token shifts subsequent text token positions.
        token_placeholder_num = torch.zeros_like(input_ids)
        token_placeholder_num[special_audio_token_mask] = num_audio_tokens.long() - 1
        token_placeholder_num = token_placeholder_num + 1
        new_token_positions = torch.cumsum(token_placeholder_num, -1) - 1
        max_token_num = token_placeholder_num.sum(-1).max()
        nb_audio_pad = max_token_num - 1 - new_token_positions[:, -1]
        if left_padding:
            new_token_positions += nb_audio_pad[:, None]  # offset for left padding
        text_to_overwrite = new_token_positions[batch_indices, non_audio_indices]
        batch_indices, non_audio_indices, text_to_overwrite = (
            batch_indices.to(target_device),
            non_audio_indices.to(target_device),
            text_to_overwrite.to(target_device),
        )

        # 3. Create the full embedding, already padded to the maximum position
        final_embedding = torch.zeros(
            batch_size, max_token_num, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
        )
        final_attention_mask = torch.zeros(
            batch_size, max_token_num, dtype=attention_mask.dtype, device=inputs_embeds.device
        )
        final_input_ids = torch.full(
            (batch_size, max_token_num), self.pad_token_id, dtype=input_ids.dtype, device=inputs_embeds.device
        )

        # 4. Fill the embeddings based on the mask. If we have ["hey" "<audio>", "how", "are"]
        # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the audio features
        final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_audio_indices]

        final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_audio_indices]
        final_input_ids[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_audio_indices]
        final_labels = None
        if labels is not None:
            labels = labels.to(target_device)
            final_labels = torch.full_like(final_attention_mask, self.ignore_index).to(torch.long)
            final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_audio_indices]

        # 5. Fill the embeddings corresponding to the audios. Anything that is still zeros needs filling
        audio_to_overwrite = torch.full(
            (batch_size, max_token_num), True, dtype=torch.bool, device=inputs_embeds.device
        )
        audio_to_overwrite[batch_indices, text_to_overwrite] = False
        seq_indices = torch.arange(max_token_num).unsqueeze(0).to(target_device)
        seq_indices = seq_indices.expand(batch_size, max_token_num)

        if left_padding:
            # exclude padding on the left
            max_token_num = max_token_num.to(target_device)
            val = (max_token_num - seq_indices) <= (
                token_placeholder_num.sum(-1) - (attention_mask == 0).long().sum(-1)
            )[:, None]
        else:
            # exclude padding on the right
            val = seq_indices < (token_placeholder_num.sum(-1) - (attention_mask == 0).long().sum(-1))[:, None]

        audio_to_overwrite &= val


        if audio_to_overwrite.sum() != num_audio_tokens.sum():
            # rank = torch.distributed.get_rank()
            # print(f"Rank: {rank}, audio_to_overwrite: {audio_to_overwrite}, num_audio_tokens: {num_audio_tokens}, left_padding: {left_padding}, ")
            # if num_audio_tokens[1] < 0:
            #     print(f"labels all: {labels}")
            #     print(f"labels: {labels[1,:]}")
            #     print(f"audio_features: {audio_features[1]}, {audio_features.shape}")
            #     print(f"feats_lengths: {num_audio_tokens}")

            raise ValueError(
                f"The input provided to the model are wrong. The number of audio tokens is {num_special_audio_tokens} while"
                f" the number of audio given to the model is {num_audios}. This prevents correct indexing and breaks batch generation."
            )

        final_embedding[audio_to_overwrite] = (
            masked_audio_features.contiguous().reshape(-1, embed_dim).to(target_device)
        )
        final_attention_mask |= audio_to_overwrite
        position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)

        # since input text is padded to seq_len, all inputs should be truncated to seq_len
        final_embedding = final_embedding[:,:sequence_length,:]
        final_attention_mask = final_attention_mask[:, :sequence_length]
        if final_labels is not None:
            final_labels = final_labels[:,:sequence_length]
        position_ids = position_ids[:, :sequence_length]
        final_input_ids = final_input_ids[:, :sequence_length]
        return final_embedding, final_attention_mask, final_labels, position_ids, final_input_ids
    
    def freeze(self, freeze_language_model: bool, freeze_whisper: bool, freeze_projection=False):
        modules = []
        # if freeze_whisper and self.speech_model is not None:
        #     modules.append(self.speech_model)
        if freeze_projection and self.speech_projection is not None:
            modules.append(self.speech_projection)
        if freeze_language_model:
            if hasattr(self, "embedding"):
                modules.append(
                        self.embedding,
                        self.decoder,
                        self.output_layer
                )

            if hasattr(self, "output_layer"):
                modules.append(self.output_layer)

        # freeze whisper bias
        # if self.speech_model is not None:
        #     for l in self.speech_model.encoder.layers:
        #         l.self_attention.linear_qkv.bias.requires_grad = False
        
        for module in modules:
            for param in module.parameters():
                param.require_grad = False
                
        
    def forward(
        self,
        input_features: torch.Tensor,
        input_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        # feature_attention_mask: Optional[torch.Tensor] = None,
        feature_lengths: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        loss_mask: Optional[torch.Tensor] = None,
        inference_params: Optional[InferenceParams] = None,
        decoder_input: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """Forward function of the PenguinsLLM model.

        Args:
            audio (torch.Tensor): input audio tensor of shape [batch, input_dim, seq_len].
            input_ids (torch.Tensor): input text ids [batch, text_seq_len].
            position_ids (torch.Tensor): input text position ids [batch, text_seq_len].
            attention_mask (torch.Tensor): Attention mask for the language model [batch, 1, combined_seq_len, combined_seq_len].
            labels (torch.Tensor): Optional target text labels [batch, combined_seq_len].
            loss_mask (torch.Tensor): Text loss mask [batch, text_seq_len].
            inference_params (InferenceParams): Inference-time parameters including KV cache.
            audio_token_index (int): ID for input audio.
            decoder_input (torch.Tensor): Pre-computed decoder input. If provided, skips embedding computation.

        Returns:
            output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size].
            loss_mask (torch.Tensor): Loss mask expanded to combined sequence length. Shape [b, s].
        """

        if decoder_input is not None:
            pass
        elif self.pre_process:
            # 1. Extract the input embeddings
            # (S, B, D)
            # if input_ids:
            inputs_embeds = self.embedding(input_ids=input_ids, position_ids=position_ids)
 
            # 2. Merge text and audios
            if input_features is not None: # and input_ids.shape[1] != 1:

                encodings, embeddings, masks = self.speech_model(input_features, feature_lengths)
                audio_feat_lengths, audio_output_lengths = self._get_feat_extract_output_lengths(
                    feature_lengths
                )
                audio_features = encodings[1]
                # logits = self.speech_model.model.ctc[1]._forward(encodings[1])
                # idxs = torch.argmax(logits[0], dim=-1).tolist()
                # idxs = [x for x in idxs if x != 0]
                # dic = {}
                # with open("resource_moe_conformer_250418/tokens_10.15.txt", 'r') as f:
                #     for line in f:
                #         token, id = line.strip().split(" ", 1)
                #         dic[int(id)] = token
                # result = "".join([dic[x] for x in idxs])
                # print(result)

                # batch_size, _, max_mel_seq_len = input_features.shape
                # max_seq_len = (max_mel_seq_len - 2) // 2 + 1
                # # Create a sequence tensor of shape (batch_size, max_seq_len)
                # seq_range = (
                #     torch.arange(0, max_seq_len, dtype=audio_feat_lengths.dtype, device=audio_feat_lengths.device)
                #     .unsqueeze(0)
                #     .expand(batch_size, max_seq_len)
                # )
             
              
                # lengths_expand = audio_feat_lengths.unsqueeze(1).expand(batch_size, max_seq_len)
                # # Create mask
                # padding_mask = seq_range >= lengths_expand

                # audio_attention_mask_ = padding_mask.view(batch_size, 1, 1, max_seq_len)

                # audio_attention_mask = audio_attention_mask_.to(
                #     dtype=torch.bool, device=self.speech_model.conv1.weight.device
                # )           

                # audio_outputs = self.speech_model(input_features, attention_mask=audio_attention_mask)
                # selected_audio_feature = audio_outputs
                audio_features = self.speech_projection(audio_features)
                # pdb.set_trace()
                # audio_features
                inputs_embeds = inputs_embeds.permute(1,0,2)
                # labels is processed in dataloader
                inputs_embeds, attention_mask, _, position_ids, final_input_ids = self._merge_input_ids_with_audio_features(
                    audio_features, audio_output_lengths, inputs_embeds, input_ids, attention_mask, labels
                )
                 
                attention_mask_batch, attention_mask_seq = tuple(attention_mask.shape)
                # construct tril mask
                attention_mask = torch.tril(torch.ones(
                    (attention_mask_batch, attention_mask_seq, attention_mask_seq), device=attention_mask.device)).view(
                    attention_mask_batch, 1, attention_mask_seq, attention_mask_seq)        

                # pdb.set_trace()
                # roll back to [S, B, D]
                inputs_embeds = inputs_embeds.permute(1,0,2)
                                                                 
            decoder_input =  inputs_embeds
        else:
            decoder_input = None # pipeline parallism
        # maybe we need to inject get_data_on_this_cp_rank here
        # from transformers import AutoTokenizer
        # tokenizer = AutoTokenizer.from_pretrained("/teaspeech_ceph/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B", trust_remote_code=True)
        # Use the superclass forward to handle the rest of the model processing
       
        output = super().forward(
            input_ids=None,
            position_ids=None,
            attention_mask=attention_mask,
            decoder_input=decoder_input,
            labels=labels,
            inference_params=inference_params,
            extra_block_kwargs=None,
        )
        output = output.contiguous()
        return output


def _load_state_dict_hook_ignore_param_names(
    param_names: List[str], module: torch.nn.Module, incompatible_keys: namedtuple
):
    """Hook to ignore missing keys during checkpoint loading.

    By default, this should not be used to avoid accidentally missing weights in checkpoint loading.

    Example use case: Use this for the speech projection if you want to load a checkpoint that contains speech and language model weights
    but not the speech projection weights.

    Args:
        param_names (list of str): Parameter names allowed to be missing when calling load_state_dict.
        module (torch.nn.Module): The torch module this hook applies to. Unused here but required by the torch API.
        incompatible_keys (namedtuple): Namedtuple with fields missing_keys and unexpected_keys, which collect the missing and unexpected
            keys when calling load_state_dict on this torch module, respectively.
    """
    for param_name in param_names:
        if param_name in incompatible_keys.missing_keys:
            logging.getLogger(__name__).warning(
                f"{param_name} being removed from incompatible_keys.missing_keys in MoEConformerLLM"
            )
            incompatible_keys.missing_keys.remove(param_name)


def get_mlp_module_spec(use_te: bool = True) -> ModuleSpec:
    # Dense MLP w/ or w/o TE modules.
    return ModuleSpec(
        module=MLP,
        submodules=MLPSubmodules(
            linear_fc1=TEColumnParallelLinear,
            linear_fc2=TERowParallelLinear
        ),
    )

# Debugging code
def model_provider(
    pre_process=True, post_process=True, add_encoder=True, add_decoder=True,
    parallel_output=True) -> MoEConformerLLM:
    from megatron.training import get_args
    from megatron.training.arguments import core_transformer_config_from_args
    from copy import deepcopy
    args = get_args()    
    decoder_config = core_transformer_config_from_args(args)
    gpt_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm, args.qk_layernorm)
    proj_config = deepcopy(decoder_config)
    proj_config.add_bias_linear = True
    proj_config.parallel_output = False

    transformer_config = TransformerConfig(
        hidden_size=1280,
        num_attention_heads=20,
        num_layers=32,
        layernorm_epsilon=1e-5,
        add_qkv_bias=True,
        # apply_query_key_layer_scaling=True,
        tensor_model_parallel_size=args.tensor_model_parallel_size
    )
    # transformer_layer_spec = ModuleSpec()
    whisper_layer_spec = get_whisper_layer_with_transformer_engine_spec()
    
    proj_layer_spec = get_mlp_module_spec().submodules
    language_vocab_size = args.padded_vocab_size
    language_max_sequence_length = args.max_position_embeddings
    model = MoEConformerLLM(
        language_transformer_config=decoder_config,
        language_transformer_layer_spec=gpt_layer_spec,
        language_vocab_size=language_vocab_size,
        language_max_sequence_length=language_max_sequence_length,
        # speech_transformer_config=transformer_config,
        # speech_transformer_layer_spec=whisper_layer_spec,
        drop_speech_class_token=False,
        # speech_projection_config=proj_config,
        # speech_projection_layer_spec=proj_layer_spec,
        # input_dim=56,
        language_position_embedding_type=args.position_embedding_type,
        pre_process=pre_process,
        post_process=post_process,
        conformer_yaml_path="/apdcephfs_qy3/share_976139/users/adrenzhou/audio_llm/zhen-encoder/conf/encoder_10.15.yaml",
        conformer_pretrained_ckp_path="/apdcephfs_qy3/share_976139/users/anhaoxing/share/zhen_model/10.15/encoder.pt"
    )
    return model


def debug_audio_llm():
    from megatron.training.checkpointing import load_checkpoint
    from megatron.training.checkpointing import save_checkpoint   
    from megatron.core.datasets.kaldi_fbank_audio_dataset import KaldiFbankAudioDataset, collate_fn
    from megatron.legacy.data.data_samplers import MegatronPretrainingRandomSampler


    initialize_megatron()

    # model, optimizer, opt_param_scheduler = setup_model_and_optimizer(model_provider, ModelType.encoder_or_decoder
    if parallel_state.get_pipeline_model_parallel_world_size() > 1:
        if parallel_state.get_pipeline_model_parallel_rank() == 0: # for 2 gpus
            model = model_provider(pre_process=True, post_process=False)
        else:
            model = model_provider(pre_process=False, post_process=True)
    else:
        model = model_provider()
    # _, _ = load_checkpoint([model], None, None,strict=False)
# load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', strict=True,
                    # ft_client=None)
    # Create random input tensor
    model = model.cuda().bfloat16()
    model.eval()
    device = torch.cuda.current_device()
    dtype = torch.bfloat16


    ann_paths = [
        # "audio_data/train_20250324_llmasr.jsonl",
        "audio_data/train_20231206_slidespeech_487h.jsonl",
        # "audio_data/train_20250417_cn_slides_generated_context_all_upper.jsonl",
        # "audio_data/train_20250410_cn_slides_draft_8kh_open_ocr_all_upper.jsonl"
    ]
    # model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B"
    model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B"

    dataset = KaldiFbankAudioDataset(ann_paths, model_path, ret_id=True, data_aug=True, sample_merge=False, read_penguins=False)

    # 配置 DataLoader
    batch_size = 1
    num_workers = 0
    from torch.utils.data import DataLoader

    from tqdm import tqdm

    batch_sampler = MegatronPretrainingRandomSampler(
        dataset=dataset,
        total_samples=len(dataset),
        consumed_samples=0,
        micro_batch_size=batch_size,
        data_parallel_rank=0,
        data_parallel_size=1,
        data_sharding=False,
    )    

    train_ds = DataLoader(
        dataset=dataset,
        batch_sampler=batch_sampler,
        num_workers=0,
        pin_memory=True,
        collate_fn=collate_fn,
        # prefetch_factor=1,
        persistent_workers=True if False else False,
    )
    import gc
    # pdb.set_trace()
    for epoch in range(1,3):
        for batch_idx, batch in tqdm(enumerate(train_ds)):   
            # pdb.set_trace()
            new_batch = {item:batch[item].to(device) for item in batch}
            new_batch['input_features'] = new_batch['input_features'].bfloat16()
            new_batch['attention_mask'] = new_batch['attention_mask'].bool()
            # new_batch['feature_attention_mask']
            print(batch_idx)
            output  = model(**new_batch)

            
            


        # output = model(input_features=feats, feature_lengths=feats_lengths)

if __name__ == "__main__":
    debug_audio_llm()
