import logging
import pdb
import sys
from collections import namedtuple
from functools import partial
from typing import List, Optional, Union

import torch
from transformers import AutoModelForPreTraining

# from megatron.core import InferenceParams
from megatron.core import InferenceParams, parallel_state, tensor_parallel
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.models.gpt import GPTModel
from megatron.core.models.gpt.gpt_layer_specs import (
    get_gpt_layer_local_spec,
    get_gpt_layer_with_transformer_engine_spec,
)
from megatron.core.models.speech.whisper_layer_specs import (
    get_whisper_layer_with_transformer_engine_spec,
)
from megatron.core.models.speech.whisper_model import WhisperMegatron
from megatron.core.models.vision.multimodal_projector import MultimodalProjector
from megatron.core.transformer.custom_layers.transformer_engine import (
    TEColumnParallelLinear,
    TEDotProductAttention,
    TELayerNormColumnParallelLinear,
    TENorm,
    TERowParallelLinear,
)
from megatron.core.transformer.enums import ModelType
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.transformer.spec_utils import ModuleSpec, build_module
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.training.initialize import initialize_megatron
from megatron.training.training import setup_model_and_optimizer

# sys.path.append("/apdcephfs/share_976139/users/hongfeixue/workspace/MLC-SLM-Baseline")
# from wenet.transformer.search import DecodeResult, ctc_greedy_search

MLC_SLM_dict = {"english":0, "french":1, "german":2, "italian":3, "japanese":4, "korean":5, "portuguese":6, "russian":7, "spanish":8, "thai":9, "vietnamese":10}      


# 定义一个模块来学习权重
class WeightLearner(torch.nn.Module):
    def __init__(self, num_languages):
        super(WeightLearner, self).__init__()
        self.weights = torch.nn.Parameter(torch.full((num_languages,), 0.5))  # 初始化为0.5

    def forward(self):
        return torch.sigmoid(self.weights)  # 将权重归一化到0-1之间
    
class DownSampleProjector(torch.nn.Module):
    def __init__(self, downsample_rate: int, idim: int, odim: int):
        super().__init__()
        self.ds_rate = downsample_rate
        self.idim = idim
        self.odim = odim
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, idim, kernel_size=3, padding=1),
            torch.nn.GELU(),
            torch.nn.Conv1d(idim, idim*self.ds_rate, kernel_size=3, stride=self.ds_rate, padding=1)
        )
        self.linear_connector = torch.nn.Sequential(
            torch.nn.Linear(self.idim*self.ds_rate, self.idim),
            torch.nn.ReLU(),
            torch.nn.Linear(self.idim, self.odim),
            torch.nn.ReLU(),
        )
        self.layernorm = torch.nn.LayerNorm(self.odim)
    
    def forward(
        self,
        x: torch.Tensor
    ):
        num_frames_to_discard = x.size(1) % self.ds_rate
        if num_frames_to_discard > 0:
            x = x[:, :-num_frames_to_discard, :]
        x = x.transpose(1, 2) # B, D, T
        x = self.conv(x)
        x = x.transpose(1, 2) # B, T, D
        x = self.linear_connector(x)
        x = self.layernorm(x)
        return x


class IdealLLM(GPTModel):
    """IdealLLM multi-modal model using Whisper for audio input."""

    def __init__(
        self,
        language_transformer_config: TransformerConfig,
        language_transformer_layer_spec: ModuleSpec,
        language_vocab_size: int,
        language_max_sequence_length: int,
        speech_transformer_config: TransformerConfig,
        speech_transformer_layer_spec: ModuleSpec,
        drop_speech_class_token: bool,
        speech_projection_config: TransformerConfig,
        speech_projection_layer_spec: ModuleSpec,
        speech_projection_type: str = "gather_affine",
        allow_missing_speech_projection_checkpoint: bool = False,
        parallel_output: bool = True,
        language_position_embedding_type: str = 'learned_absolute',
        language_rotary_percent: float = 1.0,
        pre_process: bool = True,
        post_process: bool = True,
        seq_length: int = 1500,
        input_dim: int = 128,
        conv1_out_dim: int = 1280,
        conv_kernel_size: int = 3,
        conv_stride: int = 1,
        language_rotary_base: int = 10000,
        share_embeddings_and_output_weights: bool = False,
    ) -> None:
        super().__init__(
            config=language_transformer_config,
            transformer_layer_spec=language_transformer_layer_spec,
            vocab_size=language_vocab_size,
            max_sequence_length=language_max_sequence_length,
            parallel_output=parallel_output,
            position_embedding_type=language_position_embedding_type,
            rotary_percent=language_rotary_percent,
            pre_process=pre_process,
            post_process=post_process,
            rotary_base=language_rotary_base,
            share_embeddings_and_output_weights=share_embeddings_and_output_weights
        )       
        if has_config_logger_enabled(language_transformer_config):
            log_config_to_disk(language_transformer_config, locals(), prefix=type(self).__name__)

        logging.getLogger(__name__).warning(
            "IdealLLM model is under active development. It may be missing features and its methods may change."
        )
        if pre_process:
            self.speech_model = WhisperMegatron(
                transformer_config=speech_transformer_config,
                transformer_layer_spec=speech_transformer_layer_spec,
                input_dim=input_dim,
                conv1_out_dim=conv1_out_dim,
                conv_kernel_size=conv_kernel_size,
                conv_stride=conv_stride,
                seq_length=seq_length,
                pre_process=True,
                post_process=False,
            )
            self._drop_speech_class_token = drop_speech_class_token
            # Map (intermediate) speech model outputs to the language model input dimension.
            self.audio_token_index = 151646
            self.ignore_index = -100
            self.pad_token_id = 151643
            self.downsample_projector1 = DownSampleProjector(downsample_rate=2, idim=1280, odim=2560)
            self.downsample_projector2 = DownSampleProjector(downsample_rate=2, idim=2560, odim=self.decoder.final_layernorm.weight.shape[0])

            import whisper
            from whisper.audio import HOP_LENGTH, N_FFT, N_SAMPLES
            self.n_fft = N_FFT
            self.win_length = N_FFT
            self.hop_length = HOP_LENGTH
            self.n_mels = 128
            
            self.mel_filters = whisper.audio.mel_filters
            transformer_layer = torch.nn.TransformerEncoderLayer(
                d_model=1280,
                nhead=8,
                dim_feedforward=2560,
                dropout=0.1,
                batch_first=True
            )
            # 创建包含两个 Transformer 层的 Transformer 编码器
            self.speech_transformer_adapter1 = torch.nn.TransformerEncoder(transformer_layer, num_layers=2)
            self.speech_transformer_adapter2 = torch.nn.TransformerEncoder(transformer_layer, num_layers=2)
            self.ssl2whisper = torch.nn.Linear(1024, 1280)
            self.speech_model2 =  AutoModelForPreTraining.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/mms-1b", return_dict=True)
            # self.speech_model2 = S3prlFrontend(
            #     frontend_conf = {"upstream": "wav2vec2_local", 
            #                      "upstream_ckpt": "/apdcephfs/share_976139/users/hongfeixue/model/mms_1b.converted.pt"},
            #     download_dir = "./hub",
            #     multilayer_feature = True,
            # )
            self.language_class = torch.nn.Linear(1280, len(MLC_SLM_dict.keys()))
            self.language_loss = torch.nn.CrossEntropyLoss()
            self.weight_network = WeightLearner(len(MLC_SLM_dict.keys()))

            self.pad_id = 151643 # or zero? <|endoftext|>, zero: !
            # self.sep_id = [151644, 77091, 198] # <im_start>assistant\n
            self.sep_id = [151668, 271, 151669, 271] # <im_start>assistant\n\n<think>\n\n</think>\n\n qwen3
            self.audio_id = 151644
            self.end_of_turn = 151645 # eos
            self.audio_token_index = 151646

            self.num_prompts_per_lang = 5
            self.trainable_prompts = torch.nn.Embedding(len(MLC_SLM_dict) * self.num_prompts_per_lang, self.decoder.final_layernorm.weight.shape[0])
            self.ctc_linear = torch.nn.Linear(1280, 151670) #152065)
            self.ctc_loss = torch.nn.CTCLoss(blank=151669, reduction='sum', zero_infinity=True)
            from transformers import AutoTokenizer
            self.tokenizer = AutoTokenizer.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/IdealLLM-qwen3-nolora")
        else:
            self.speech_model = None
            self.speech_projection = None

        # This allows ignoring missing weights for the speech projection during checkpoint loading.
        # if allow_missing_speech_projection_checkpoint:
        #     speech_projection_param_names = [
        #         f"speech_projection.{name}"
        #         for name in self.speech_projection.state_dict().keys()
        #     ]
        #     self.speech_projection.register_load_state_dict_post_hook(
        #         partial(_load_state_dict_hook_ignore_param_names, speech_projection_param_names)
        #     )

    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """
        input_lengths = (input_lengths - 1) // 2 + 1
        output_lengths = (input_lengths - 2) // 2 + 1
        output_lengths = (output_lengths - 2) // 2 + 1
        return input_lengths, output_lengths

    def _merge_input_ids_with_audio_features(
        self, audio_features, num_audio_tokens, inputs_embeds, input_ids, attention_mask, labels
    ):
        """
        Merge input_ids with with audio features into final embeddings

        Args:
            audio_features (`torch.Tensor` of shape `(num_audios, max_audio_tokens, embed_dim)`):
                All audio vectors of all audios in the batch
            num_audio_tokens (`torch.LongTensor` of shape `(num_audios)`):
                The length of audio embeddings of each audio as stacked in `audio_features`
            inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, embed_dim)`):
                Token embeddings before merging with audio embeddings
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Input_ids of tokens, possibly filled with audio token
            attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Mask to avoid performing attention on padding token indices.
            labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*)
                labels need to be recalculated to support training (if provided)
        Returns:
            final_embedding, final_attention_mask, final_labels, position_ids, final_input_ids

        Explanation:
            each audio has variable length embeddings, with length specified by num_audio_tokens
            audio_features is concatenation of all audio embed vectors
            task: fill each <|AUDIO|> with the correct number of audio embeddings
            Example:
                X (5 tokens), Y (3 tokens), Z (8 tokens)
                X, Y are in the same sequence (in-context learning)
            if right padding
                input_ids: [
                    a b c d e f X g h i j k Y l m
                    o p q r Z s t u v _ _ _ _ _ _
                ]
                input_ids should be: [
                    a b c d e f X X X X X g h i j k Y Y Y l m
                    o p q r Z Z Z Z Z Z Z Z s t u v _ _ _ _ _
                ]
                labels should be: [
                    a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
                    o p q r _ _ _ _ _ _ _ _ s t u v _ _ _ _ _
                ]
            elif left padding
                input_ids: [
                    a b c d e f X g h i j k Y l m
                    _ _ _ _ _ _ o p q r Z s t u v
                ]
                input_ids should be: [
                    a b c d e f X X X X X g h i j k Y Y Y l m
                    _ _ _ _ _ o p q r Z Z Z Z Z Z Z Z s t u v
                ]
                labels should be: [
                    a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
                    _ _ _ _ _ o p q r _ _ _ _ _ _ _ _ s t u v
                ]
            Edge cases:
                * If tokens are same but audio token sizes are different, then cannot infer left or right padding
                ```python
                url1 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
                audio1, _ = librosa.load(BytesIO(urlopen(url1).read()), sr=processor.feature_extractor.sampling_rate)
                url2 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav"
                audio2, _ = librosa.load(BytesIO(urlopen(url2).read()), sr=processor.feature_extractor.sampling_rate)
                prompts = [
                    "[INST] <|AUDIO|>\nWhat is that in this audio? [/INST]",
                    "[INST] <|AUDIO|>\nWhat is that in this audio? [/INST]",
                ]
                inputs = processor(text=prompts, audios=[audio1, audio2], return_tensors='pt', padding=True).to("cuda")
                    audio1 has 101 tokens, while audio2 has 72 tokens
                ```

                input_ids: [
                    a b c d X g h
                    i j Y k l m n
                ]
                where X is 3 tokens while Y is 5, this mean after merge
                if left-padding (batched generation)
                    input_ids should be: [
                        _ _ a b c d X X X g h
                        i j Y Y Y Y Y k l m n
                    ]
                elif (right padding) (training)
                    input_ids should be: [
                        a b c d X X X g h _ _
                        i j Y Y Y Y Y k l m n
                    ]
        """       
        num_audios, max_audio_tokens, embed_dim = audio_features.shape
        audio_features_mask = torch.arange(max_audio_tokens).expand(num_audios, max_audio_tokens).to(
            num_audio_tokens.device
        ) < num_audio_tokens.unsqueeze(1)
        masked_audio_features = audio_features[audio_features_mask].view(-1, embed_dim)
        batch_size, sequence_length = input_ids.shape
        _left_padding = torch.any(attention_mask[:, 0] == 0)
        _right_padding = torch.any(attention_mask[:, -1] == 0)

        left_padding = True
        if batch_size > 1:
            if _left_padding and not _right_padding:
                left_padding = True
            elif not _left_padding and _right_padding:
                left_padding = False
            elif not _left_padding and not _right_padding:
                # both side is 1, so cannot tell
                #left_padding = self.padding_side == "left"
                left_padding = False
            else:
                # invalid attention_mask
                raise ValueError(f"both side of attention_mask has zero, invalid. {attention_mask}")

        # 1. Create a mask to know where special audio tokens are
        special_audio_token_mask = input_ids == self.audio_token_index
        num_special_audio_tokens = torch.sum(special_audio_token_mask, dim=-1)

        # In case the Audio model or the Language model has been offloaded to CPU, we need to manually
        # set the corresponding tensors into their correct target device.
        target_device = inputs_embeds.device
        attention_mask = attention_mask.to(target_device)
        input_ids = input_ids.to(target_device)
        num_audio_tokens = num_audio_tokens.to(target_device)
        batch_indices, non_audio_indices = torch.where(
            (input_ids != self.audio_token_index) & (attention_mask == 1)
        )

        # 2. Compute the positions where text should be written
        # Calculate new positions for text tokens in merged audio-text sequence.
        # `special_audio_token_mask` identifies audio tokens. Each audio token will be replaced by `audio_feat_lengths - 1` text tokens.
        # `torch.cumsum` computes how each audio token shifts subsequent text token positions.
        token_placeholder_num = torch.zeros_like(input_ids)
        token_placeholder_num[special_audio_token_mask] = num_audio_tokens.long() - 1
        token_placeholder_num = token_placeholder_num + 1
        new_token_positions = torch.cumsum(token_placeholder_num, -1) - 1
        max_token_num = token_placeholder_num.sum(-1).max()
        nb_audio_pad = max_token_num - 1 - new_token_positions[:, -1]
        if left_padding:
            new_token_positions += nb_audio_pad[:, None]  # offset for left padding
        text_to_overwrite = new_token_positions[batch_indices, non_audio_indices]
        batch_indices, non_audio_indices, text_to_overwrite = (
            batch_indices.to(target_device),
            non_audio_indices.to(target_device),
            text_to_overwrite.to(target_device),
        )

        # 3. Create the full embedding, already padded to the maximum position
        final_embedding = torch.zeros(
            batch_size, max_token_num, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
        )
        final_attention_mask = torch.zeros(
            batch_size, max_token_num, dtype=attention_mask.dtype, device=inputs_embeds.device
        )
        final_input_ids = torch.full(
            (batch_size, max_token_num), self.pad_token_id, dtype=input_ids.dtype, device=inputs_embeds.device
        )

        # 4. Fill the embeddings based on the mask. If we have ["hey" "<audio>", "how", "are"]
        # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the audio features
        final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_audio_indices]

        final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_audio_indices]
        final_input_ids[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_audio_indices]
        final_labels = None
        if labels is not None:
            labels = labels.to(target_device)
            final_labels = torch.full_like(final_attention_mask, self.ignore_index).to(torch.long)
            final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_audio_indices]

        # 5. Fill the embeddings corresponding to the audios. Anything that is still zeros needs filling
        audio_to_overwrite = torch.full(
            (batch_size, max_token_num), True, dtype=torch.bool, device=inputs_embeds.device
        )
        audio_to_overwrite[batch_indices, text_to_overwrite] = False
        seq_indices = torch.arange(max_token_num).unsqueeze(0).to(target_device)
        seq_indices = seq_indices.expand(batch_size, max_token_num)

        if left_padding:
            # exclude padding on the left
            max_token_num = max_token_num.to(target_device)
            val = (max_token_num - seq_indices) <= (
                token_placeholder_num.sum(-1) - (attention_mask == 0).long().sum(-1)
            )[:, None]
        else:
            # exclude padding on the right
            val = seq_indices < (token_placeholder_num.sum(-1) - (attention_mask == 0).long().sum(-1))[:, None]

        audio_to_overwrite &= val
        if audio_to_overwrite.sum() != num_audio_tokens.sum():
            
            raise ValueError(
                f"The input provided to the model are wrong. The number of audio tokens is {num_special_audio_tokens} while"
                f" the number of audio given to the model is {num_audios}. This prevents correct indexing and breaks batch generation."
            )

        final_embedding[audio_to_overwrite] = (
            masked_audio_features.contiguous().reshape(-1, embed_dim).to(target_device)
        )
        final_attention_mask |= audio_to_overwrite
        position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)

        # print(f"final_embedding: {final_embedding.shape}")
        # print(f"final_attention_mask: {final_attention_mask.shape}")
        # print(f"final_labels: {final_labels.shape}")
        # print(f"sequence_length: {sequence_length}")
        # torch.set_printoptions(threshold=float('inf'))


        # since input text is padded to seq_len, all inputs should be truncated to seq_len
        final_embedding = final_embedding[:,:sequence_length,:]
        final_attention_mask = final_attention_mask[:, :sequence_length]
        if final_labels is not None:
            final_labels = final_labels[:,:sequence_length]
        position_ids = position_ids[:, :sequence_length]
        final_input_ids = final_input_ids[:, :sequence_length]
        # if parallel_state.get_tensor_model_parallel_rank() == 0:
        #     print("after: ", final_input_ids)
        return final_embedding, final_attention_mask, final_labels, position_ids, final_input_ids
    
    def freeze(self, freeze_language_model: bool, freeze_whisper: bool, freeze_projection=False):
        # Print trainable parameters
        freeze_projection = False
        freeze_whisper = False
        for k, p in self.named_parameters():
            # link 包括下采样块， transformer块， 前后linear块
            if freeze_language_model and k.startswith("embedding"):
                p.requires_grad = False
            # if freeze_whisper and "speech_model" in k:
            #     p.requires_grad = False
            if "speech_model" in k and 'encoder' in k:
                p.requires_grad = True
            if freeze_language_model and ("decoder" in k or "output_layer" in k):
                p.requires_grad = False
            if freeze_projection and ("weight_network" in k or "featurizer" in k or "speech_transformer_adapter1" in k or 'downsample_projector' in k or 'ctc_linear' in k or 'trainable_prompts' in k):
                p.requires_grad = False
            if freeze_projection and 'speech_transformer_adapter2' in k and 'layers.1' in k:
                p.requires_grad = False
            if parallel_state.get_tensor_model_parallel_rank() == 0:
                print(f"{k} {p.requires_grad}")
            

        """统计模型的总参数和可训练参数"""
        total_params = 0
        trainable_params = 0
        for p in self.parameters():
            total_params += p.numel()
            if p.requires_grad:
                trainable_params += p.numel()
        if parallel_state.get_tensor_model_parallel_rank() == 0:
            print(f'--------------------------------------------------------------------------------{freeze_language_model}, {freeze_whisper}, {freeze_projection}')
            print(f"总参数数量: {total_params:,} 可训练参数数量: {trainable_params:,} (占比: {trainable_params/total_params:.2%})")

    def log_mel_spectrogram(
        self,
        audio: torch.Tensor,
        ilens: torch.Tensor = None,
    ) -> torch.Tensor:
        """Use log-mel spectrogram computation native to Whisper training"""
        window = torch.hann_window(self.win_length).to(audio.device)
        stft = torch.stft(
            audio, self.n_fft, self.hop_length, window=window, return_complex=True
        )

        # whisper deletes the last frame by default (Shih-Lun)
        magnitudes = stft[..., :-1].abs() ** 2

        filters = self.mel_filters(audio.device, self.n_mels)
        mel_spec = filters @ magnitudes

        log_spec = torch.clamp(mel_spec, min=1e-10).log10()

        if ilens is not None:
            olens = ilens // self.hop_length
        else:
            olens = None

        log_spec = torch.maximum(
            log_spec,
            log_spec.view(audio.size(0), -1).max(dim=-1)[0][:, None, None] - 8.0,
        )
        log_spec = (log_spec + 4.0) / 4.0

        return log_spec, olens

    def prompt_wrap(self, speech_embeds, language_id):
        batch_size = speech_embeds.size(0)
        prompt_ids = language_id.unsqueeze(1) * self.num_prompts_per_lang + torch.arange(
            self.num_prompts_per_lang, device=speech_embeds.device
        )  # (batch_size, 10)
        prompt_embeds = self.trainable_prompts(prompt_ids)
        wrapped_embeds = torch.cat([prompt_embeds, speech_embeds], dim=1)
        return wrapped_embeds
                
    def dual_encoder(self, input_features, raw_wav_lengths):
        feats, feats_lens = self.log_mel_spectrogram(input_features, raw_wav_lengths)
        feats = feats.to(torch.bfloat16)
        max_len = feats.size(-1)  
        positions = torch.arange(max_len, device=feats_lens.device).expand(len(feats_lens), max_len)
        # 将长度tensor扩展为 [batch_size, max_len]
        lengths_expanded = feats_lens.unsqueeze(1).expand(-1, max_len)
        # 生成mask矩阵 (小于等于长度的位置为1，大于长度的位置为0)
        feats_mask = (positions < lengths_expanded).float()
        # encoder_out, encoder_lens = self.speech_model(feats, feats_lens) # 2 times subsampling
        audio_feat_lengths, audio_output_lengths = self._get_feat_extract_output_lengths(
                feats_mask.sum(-1)
            )
        batch_size, _, max_mel_seq_len = feats.shape
        max_seq_len = (max_mel_seq_len - 2) // 2 + 1

        if max_mel_seq_len % 2 == 1:
            max_seq_len += 1
        # Create a sequence tensor of shape (batch_size, max_seq_len)
        seq_range = (
            torch.arange(0, max_seq_len, dtype=audio_feat_lengths.dtype, device=audio_feat_lengths.device)
            .unsqueeze(0)
            .expand(batch_size, max_seq_len)
        )
        
        lengths_expand = audio_feat_lengths.unsqueeze(1).expand(batch_size, max_seq_len)
        # Create mask
        padding_mask = seq_range >= lengths_expand

        audio_attention_mask_ = padding_mask.view(batch_size, 1, 1, max_seq_len)

        audio_attention_mask = audio_attention_mask_.to(
            dtype=torch.bool, device=self.speech_model.conv1.weight.device
        )           

        encoder_out = self.speech_model(feats, attention_mask=audio_attention_mask, pooling=False)
        # Transformer Encoder src mask
        # 为每个批次生成 (seq_len, seq_len) 形状的掩码
        speech_mask = audio_attention_mask.squeeze(1)
        speech_mask = speech_mask.squeeze(1)
        speech_embeds = self.speech_transformer_adapter1(encoder_out, src_key_padding_mask=speech_mask)
        speech_lens = (~speech_mask).sum(dim=-1)
        lid_embeds = torch.mean(speech_embeds, dim=1)
        
        self.speech_model2.to(torch.float32)
        max_seq = max(raw_wav_lengths)
        positions = torch.arange(max_seq, device=raw_wav_lengths.device).expand(len(raw_wav_lengths), max_seq)
        speech_mask2 = positions >= raw_wav_lengths.unsqueeze(1)
        encoder_out2 = self.speech_model2(input_features.to(torch.float32), speech_mask2).projected_states
        encoder_out2 = encoder_out2.to(torch.bfloat16)
        speech_embeds2 = self.ssl2whisper(encoder_out2)
        speech_mask2 = speech_mask
        if speech_embeds.shape[1] > speech_embeds2.shape[1]:
            speech_embeds2 = torch.cat((speech_embeds2, speech_embeds[:, speech_embeds2.shape[1]:, :]), dim=1)            
        elif speech_embeds.shape[1] < speech_embeds2.shape[1]:
            speech_embeds2 = speech_embeds2[:, :speech_embeds.shape[1], :]

        # Transformer Encoder src mask
        # speech_lens2
        # max_seq = encoder_out2.size(1)
        # positions = torch.arange(max_seq, device=encoder_out2.device).expand(len(speech_lens2), max_seq)
        # speech_mask2 = positions >= speech_lens2.unsqueeze(1)
        speech_embeds2 = self.speech_transformer_adapter2(speech_embeds2, src_key_padding_mask=speech_mask2)
        lid_embeds2 = torch.mean(speech_embeds2, dim=1)
        
        # 1.1 LID_CTC融合, Dual speech embeds -> mix speech embeds
        lid_embeds = lid_embeds + lid_embeds2
        lid_embeds = self.language_class(lid_embeds)

        language_probs = torch.softmax(lid_embeds, dim=-1)  # (N, num_languages)
        _, predicted_lid = torch.max(language_probs, dim=1)  # (N,)
        current_weights = self.weight_network() * 2  # (N,)
        selected_weights = current_weights[predicted_lid]
        weight1 = 2 - selected_weights.unsqueeze(1)
        weight2 = selected_weights.unsqueeze(1)

        # # 扩展 weight 以匹配 speech_embedding 的维度
        B, T, C = speech_embeds.shape
        weight1 = weight1.unsqueeze(1)  # (B, 1, 1)
        weight1 = weight1.expand(B, T, C)  # (B, T, N)

        weight2 = weight2.unsqueeze(1)  # (B, 1, 1)
        weight2 = weight2.expand(B, T, C)  # (B, T, N)
        speech_embeds = weight1 * speech_embeds + weight2 * speech_embeds2
        return speech_embeds, lid_embeds, speech_lens      

    def real_labels(self, labels, loss_mask):
        # 保留-100和151643之间的值，并记录原始长度
        processed_rows = []
        original_lengths = []
        max_length = 0
        
        for row, mask in zip(labels, loss_mask):
            # 保留符合条件的值
            valid_values = row[mask.bool()]
            processed_rows.append(valid_values)
            original_lengths.append(len(valid_values))
            max_length = max(max_length, len(valid_values))

        # 填充操作
        padded_rows = []
        for row in processed_rows:
            padding_length = max_length - len(row)
            if padding_length > 0:
                padding = torch.full((padding_length,), self.pad_id, device=labels.device)
                padded_row = torch.cat((row, padding))
            else:
                padded_row = row
            padded_rows.append(padded_row)

        # 拼接操作
        result = torch.stack(padded_rows)
        return result.to(labels.device), torch.tensor(original_lengths).to(labels.device)

    def forward(
        self,
        input_features: torch.Tensor,
        input_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        feature_attention_mask: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        raw_wav_lengths: Optional[torch.Tensor] = None,
        language_id: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        loss_mask: Optional[torch.Tensor] = None,
        inference_params: Optional[InferenceParams] = None,
        decoder_input: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """Forward function of the IdealLLM model.

        Args:
            audio (torch.Tensor): input audio tensor of shape [batch, input_dim, seq_len].
            input_ids (torch.Tensor): input text ids [batch, text_seq_len].
            position_ids (torch.Tensor): input text position ids [batch, text_seq_len].
            attention_mask (torch.Tensor): Attention mask for the language model [batch, 1, combined_seq_len, combined_seq_len].
            labels (torch.Tensor): Optional target text labels [batch, combined_seq_len].
            loss_mask (torch.Tensor): Text loss mask [batch, text_seq_len].
            inference_params (InferenceParams): Inference-time parameters including KV cache.
            audio_token_index (int): ID for input audio.
            decoder_input (torch.Tensor): Pre-computed decoder input. If provided, skips embedding computation.

        Returns:
            output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size].
            loss_mask (torch.Tensor): Loss mask expanded to combined sequence length. Shape [b, s].
        """

        if decoder_input is not None:
            pass
        elif self.pre_process:
            # 1. Extract the input embeddings
            # (S, B, D)
            inputs_embeds = self.embedding(input_ids=input_ids, position_ids=position_ids)
 
            # 2. Merge text and audios
            if input_features is not None and input_ids.shape[1] != 1:
                # 1. Dual Encoder -> Dual speech embeds, lid embeds
                # pdb.set_trace()
                speech_embeds, lid_embeds, speech_lens = self.dual_encoder(input_features, raw_wav_lengths)

                ctc_proj = self.ctc_linear(speech_embeds).transpose(0, 1).to(torch.float32)
                ctc_proj = ctc_proj.log_softmax(dim=-1)
                # if parallel_state.get_tensor_model_parallel_rank() == 0:
                #     ctc_greedy_results = ctc_greedy_search(
                #         ctc_proj.transpose(0, 1), speech_lens, 151669)
                #     for ctc_result in ctc_greedy_results:
                #         ctc_tokens = ctc_result.tokens
                #         print("ctc_tokens:", self.tokenizer.decode(ctc_tokens, skip_special_tokens=True))

                # 2. Projector
                speech_embeds = self.downsample_projector1(speech_embeds)
                speech_embeds = self.downsample_projector2(speech_embeds)
                # 计算下采样后的长度 (每次下采样率是2，总共下采样4倍)
                speech_lens = (speech_lens - 2) // 2 + 1  # 第一次下采样
                speech_lens = (speech_lens - 2) // 2 + 1  # 第一次下采样

                # 3. wrap speech_embeds with prompts
                speech_embeds = self.prompt_wrap(speech_embeds, language_id)
                speech_lens += self.num_prompts_per_lang

                # 4. prepare inputs for llm 
                audio_features = speech_embeds
                audio_output_lengths = speech_lens
                inputs_embeds = inputs_embeds.permute(1,0,2)
                # labels is processed in dataloader
                inputs_embeds, attention_mask, _, position_ids, final_input_ids = self._merge_input_ids_with_audio_features(
                    audio_features, audio_output_lengths, inputs_embeds, input_ids, attention_mask, labels
                )

                ctc_labels, ctc_lens = self.real_labels(labels, loss_mask)
                # if parallel_state.get_tensor_model_parallel_rank() == 0:
                #     print("ground truth: ", self.tokenizer.batch_decode(ctc_labels, skip_special_tokens=True))
                # pdb.set_trace()
                # roll back to [S, B, D]
                inputs_embeds = inputs_embeds.permute(1,0,2)
                                                                 
            decoder_input =  inputs_embeds
        else:
            decoder_input = None # pipeline parallism
        
        loss_ctc = self.ctc_loss(ctc_proj, ctc_labels, speech_lens, ctc_lens)
        loss_ctc = loss_ctc / decoder_input.size(0)
        loss_lid = self.language_loss(lid_embeds, language_id)
        # loss_ctc = torch.tensor(0)
        # loss_lid = torch.tensor(0)
        output = super().forward(
            input_ids=None,
            position_ids=None,
            attention_mask=attention_mask,
            decoder_input=decoder_input,
            labels=labels,
            inference_params=inference_params,
            extra_block_kwargs=None,
        )
        # loss_decoder, logits_decoder = output
        loss_decoder = output.contiguous()
        # if parallel_state.get_tensor_model_parallel_rank() == 0:
        #     batch_size = logits_decoder.shape[0]
        #     maxlen = logits_decoder.size(1)
        #     topk_prob, topk_index = logits_decoder.topk(1, dim=2)  # (B, maxlen, 1)
        #     topk_index = topk_index.view(batch_size, maxlen)  # (B, maxlen)
        #     selected_indices = topk_index[loss_mask.bool()]
        #     # topk_index = topk_index * loss_mask
        #     hyps = [hyp.tolist() for hyp in selected_indices.unsqueeze(0)]
        #     hyps_all = [hyp.tolist() for hyp in topk_index]
        #     print('decoder predict: ', self.tokenizer.batch_decode(hyps, skip_special_tokens=True))
        #     # print('decoder predict all: ', self.tokenizer.batch_decode(hyps_all[0], skip_special_tokens=True))
        #     print('decoder label: ', labels)
        #     pdb.set_trace()
        return loss_decoder, loss_lid, loss_ctc


def _load_state_dict_hook_ignore_param_names(
    param_names: List[str], module: torch.nn.Module, incompatible_keys: namedtuple
):
    """Hook to ignore missing keys during checkpoint loading.

    By default, this should not be used to avoid accidentally missing weights in checkpoint loading.

    Example use case: Use this for the speech projection if you want to load a checkpoint that contains speech and language model weights
    but not the speech projection weights.

    Args:
        param_names (list of str): Parameter names allowed to be missing when calling load_state_dict.
        module (torch.nn.Module): The torch module this hook applies to. Unused here but required by the torch API.
        incompatible_keys (namedtuple): Namedtuple with fields missing_keys and unexpected_keys, which collect the missing and unexpected
            keys when calling load_state_dict on this torch module, respectively.
    """
    for param_name in param_names:
        if param_name in incompatible_keys.missing_keys:
            logging.getLogger(__name__).warning(
                f"{param_name} being removed from incompatible_keys.missing_keys in IdealLLM"
            )
            incompatible_keys.missing_keys.remove(param_name)


def get_mlp_module_spec(use_te: bool = True) -> ModuleSpec:
    # Dense MLP w/ or w/o TE modules.
    return ModuleSpec(
        module=MLP,
        submodules=MLPSubmodules(
            linear_fc1=TEColumnParallelLinear,
            linear_fc2=TERowParallelLinear
        ),
    )

# Debugging code
def model_provider(
    pre_process=True, post_process=True, add_encoder=True, add_decoder=True,
    parallel_output=True) -> IdealLLM:
    from copy import deepcopy

    from megatron.training import get_args
    from megatron.training.arguments import core_transformer_config_from_args
    args = get_args()    
    decoder_config = core_transformer_config_from_args(args)
    gpt_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm, args.qk_layernorm)
    proj_config = deepcopy(decoder_config)
    proj_config.add_bias_linear = True
    proj_config.parallel_output = False

    transformer_config = TransformerConfig(
        hidden_size=1280,
        num_attention_heads=20,
        num_layers=32,
        layernorm_epsilon=1e-5,
        add_qkv_bias=True,
        # apply_query_key_layer_scaling=True,
        tensor_model_parallel_size=args.tensor_model_parallel_size
    )
    # transformer_layer_spec = ModuleSpec()
    whisper_layer_spec = get_whisper_layer_with_transformer_engine_spec()
    
    proj_layer_spec = get_mlp_module_spec().submodules
    language_vocab_size = args.padded_vocab_size
    language_max_sequence_length = args.max_position_embeddings
    model = IdealLLM(
        language_transformer_config=decoder_config,
        language_transformer_layer_spec=gpt_layer_spec,
        language_vocab_size=language_vocab_size,
        language_max_sequence_length=language_max_sequence_length,
        speech_transformer_config=transformer_config,
        speech_transformer_layer_spec=whisper_layer_spec,
        drop_speech_class_token=False,
        speech_projection_config=proj_config,
        speech_projection_layer_spec=proj_layer_spec,
        language_position_embedding_type=args.position_embedding_type,
        pre_process=pre_process,
        post_process=post_process
    )
    return model


def debug_audio_llm():
    from megatron.training.checkpointing import load_checkpoint, save_checkpoint   
    initialize_megatron()

    # model, optimizer, opt_param_scheduler = setup_model_and_optimizer(model_provider, ModelType.encoder_or_decoder
    if parallel_state.get_pipeline_model_parallel_world_size() > 1:
        if parallel_state.get_pipeline_model_parallel_rank() == 0: # for 2 gpus
            model = model_provider(pre_process=True, post_process=False)
        else:
            model = model_provider(pre_process=False, post_process=True)
    else:
        model = model_provider()
    # _, _ = load_checkpoint([model], None, None,strict=False)
# load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', strict=True,
                    # ft_client=None)
    # Create random input tensor
    model = model.cuda().bfloat16()
    model.eval()
    device = torch.cuda.current_device()
    dtype = torch.bfloat16
    dic = torch.load('hf_audio.pt')
    with torch.no_grad():
        audio_tensor = dic['input_features'].cuda(torch.cuda.current_device())
        audio_tensor = audio_tensor.to(dtype=torch.bfloat16)
        feature_attention_mask = dic['feature_attention_mask']
        feature_attention_mask = feature_attention_mask.to(dtype=torch.bool)
        # audio_len  = (dic['feature_attention_mask']).sum(-1)
        # feature_attention_mask = torch.zeros(1,1,1,1500, dtype=torch.bool).to(device)
        # feature_attention_mask[:,:,:,audio_len:] = True
        attention_mask = dic['attention_mask'].to(device).to(dtype=torch.bool)
        input_ids = dic['input_ids'].to(device).to(dtype=torch.int64)
        # save_checkpoint(
        #     1,
        #     model,
        #     None,
        #     None,
        #     0
        # )
        # model = model[0]
        output = model(
            input_features=audio_tensor,
            feature_attention_mask=feature_attention_mask,
            input_ids=input_ids,
            attention_mask=attention_mask
        )
        output = tensor_parallel.all_gather_last_dim_from_tensor_parallel_region(output)
        # if parallel_state.get_pipeline_model_parallel_rank() == 1:
        # ref_output = dic['logits'].to(device)
        # abs_mean = (output - ref_output).abs().mean()
        # print("ABS Mean: ", abs_mean)
        # output = model(audio=audio_tensor, feature_attention_mask=attention_mask)
        # pdb.set_trace()
        # print("Output shape:", output.shape)
    # result = dic['result_hidden'].cpu()
    # print(f"max diff: {(output - result).abs().max()}")
    # print(f"mean diff: {(output - result).abs().mean()}")


if __name__ == "__main__":
    debug_audio_llm()
