# modeling_Idealllm.py
import pdb
import sys
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union

import torch
import torch.nn as nn
from transformers import (
    AutoModelForCausalLM,
    AutoModelForPreTraining,
    AutoModelForSpeechSeq2Seq,
    AutoProcessor,
    BertConfig,
    Qwen2AudioConfig,
    Qwen2AudioForConditionalGeneration,
    Qwen2AudioPreTrainedModel,
)
from transformers.generation import GenerationMixin
from transformers.modeling_outputs import BaseModelOutput, ModelOutput

sys.path.append("/apdcephfs/share_976139/users/hongfeixue/workspace/MLC-SLM-Baseline")
from peft import LoraConfig, TaskType, get_peft_model
from wenet.transformer.search import DecodeResult, ctc_greedy_search

from configuration_IdealLLM_v2 import IdealLLMConfig

MLC_SLM_dict = {"english":0, "french":1, "german":2, "italian":3, "japanese":4, "korean":5, "portuguese":6, "russian":7, "spanish":8, "thai":9, "vietnamese":10}      


# 定义一个模块来学习权重
class WeightLearner(torch.nn.Module):
    def __init__(self, num_languages):
        super(WeightLearner, self).__init__()
        self.weights = torch.nn.Parameter(torch.full((num_languages,), 0.5))  # 初始化为0.5

    def forward(self):
        return torch.sigmoid(self.weights)  # 将权重归一化到0-1之间
    
class DownSampleProjector(torch.nn.Module):
    def __init__(self, downsample_rate: int, idim: int, odim: int):
        super().__init__()
        self.ds_rate = downsample_rate
        self.idim = idim
        self.odim = odim
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, idim, kernel_size=3, padding=1),
            torch.nn.GELU(),
            torch.nn.Conv1d(idim, idim*self.ds_rate, kernel_size=3, stride=self.ds_rate, padding=1)
        )
        self.linear_connector = torch.nn.Sequential(
            torch.nn.Linear(self.idim*self.ds_rate, self.idim),
            torch.nn.ReLU(),
            torch.nn.Linear(self.idim, self.odim),
            torch.nn.ReLU(),
        )
        self.layernorm = torch.nn.LayerNorm(self.odim)
    
    def forward(
        self,
        x: torch.Tensor
    ):
        num_frames_to_discard = x.size(1) % self.ds_rate
        if num_frames_to_discard > 0:
            x = x[:, :-num_frames_to_discard, :]
        x = x.transpose(1, 2) # B, D, T
        x = self.conv(x)
        x = x.transpose(1, 2) # B, T, D
        x = self.linear_connector(x)
        x = self.layernorm(x)
        return x

@dataclass
class Qwen2AudioCausalLMOutputWithPast(ModelOutput):
    """
    Base class for Qwen2Audio causal language model (or autoregressive) outputs.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Language modeling loss (for next-token prediction).
        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`)

            Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
            `past_key_values` input) to speed up sequential decoding.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        attention_mask (`torch.FloatTensor`, *optional*):
            Attentions mask, used to update attention mask and position_ids.
    """

    loss: Optional[torch.FloatTensor] = None
    logits: torch.FloatTensor = None
    past_key_values: Optional[List[torch.FloatTensor]] = None
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None
    attention_mask: Optional[torch.FloatTensor] = None

class IdealLLMModel(Qwen2AudioForConditionalGeneration, GenerationMixin):
    config_class = IdealLLMConfig
    
    def __init__(self, config: IdealLLMConfig):
        super().__init__(config)
        self.pad_token_id = 151643
        self.audio_token_index = 151646
        self.ignore_index = -100

        # self.speech_projection = IdealLLMAdapter(config)
        
        # 保持原语言模型部分
        # Load model directly
        self.processor = AutoProcessor.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/whisper-large-v3")
        whisper_model = AutoModelForSpeechSeq2Seq.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/whisper-large-v3")
        self.speech_model1 = whisper_model.model.encoder
        del whisper_model
        del self.multi_modal_projector
        del self.audio_tower
        # del self.speech_model1.layer_norm
        # pdb.set_trace()
        # if speech_model2 is not None:
        #     self.speech_model2 = speech_model2
        # else:
        #     self.speech_model2 = S3prlFrontend(
        #         frontend_conf = {"upstream": "wav2vec2_local", 
        #                         "upstream_model_config": None,
        #                         "upstream_ckpt": "/apdcephfs/share_976139/users/hongfeixue/model/mms_1b.converted.pt"},
        #         download_dir = "./hub",
        #         multilayer_feature = True,
        #     )
        self.ssl2whisper = torch.nn.Linear(1024, 1280)
        self.speech_model2 =  AutoModelForPreTraining.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/mms-1b", return_dict=True)
        self.language_model = AutoModelForCausalLM.from_config(config.text_config)
        self.use_llm_lora = False
        if self.use_llm_lora:
            target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "up_proj","gate_proj", "down_proj"]
            peft_config = LoraConfig(
                task_type=TaskType.CAUSAL_LM, 
                inference_mode=True, 
                r=32, 
                lora_alpha=8, 
                lora_dropout=0.0,
                target_modules=target_modules,
            )
            self.language_model = get_peft_model(self.language_model, peft_config)
        transformer_layer = torch.nn.TransformerEncoderLayer(
            d_model=1280,
            nhead=8,
            dim_feedforward=2560,
            dropout=0.1,
            batch_first=True
        )
        # 创建包含两个 Transformer 层的 Transformer 编码器
        self.speech_transformer_adapter1 = torch.nn.TransformerEncoder(transformer_layer, num_layers=2)
        self.speech_transformer_adapter2 = torch.nn.TransformerEncoder(transformer_layer, num_layers=2)

        self.language_class = torch.nn.Linear(1280, len(MLC_SLM_dict.keys()))
        self.weight_network = WeightLearner(len(MLC_SLM_dict.keys()))
        self.downsample_projector1 = DownSampleProjector(downsample_rate=2, idim=1280, odim=2560)
        self.downsample_projector2 = DownSampleProjector(downsample_rate=2, idim=2560, odim=4096)

        self.pad_id = 151643 # or zero? <|endoftext|>, zero: !
        self.sep_id = [151644, 77091, 198] # <im_start>assistant\n
        self.audio_id = 151644
        self.end_of_turn = 151645 # eos
        self.audio_token_index = 151646
        self.num_prompts_per_lang = 5
        self.trainable_prompts = torch.nn.Embedding(len(MLC_SLM_dict) * self.num_prompts_per_lang, 4096)
        self.ctc_linear = torch.nn.Linear(1280, 151670) #152065)
        from transformers import AutoTokenizer
        self.tokenizer = AutoTokenizer.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/Qwen3-8B-base")

        self.qwen3_prompt_head = "<|im_start|>user\nTranscribe the speech to text.<|im_end|>\n"
        self.qwen3_prompt_tail = "<|im_start|>assistant\n<think>\n\n</think>\n\n"
        self.qwen3_prompt_head_token = torch.tensor(self.tokenizer.encode(self.qwen3_prompt_head, add_special_tokens=False))
        self.qwen3_prompt_tail_token = torch.tensor(self.tokenizer.encode(self.qwen3_prompt_tail, add_special_tokens=False))

        import whisper
        from whisper.audio import HOP_LENGTH, N_FFT, N_SAMPLES
        self.n_fft = N_FFT
        self.win_length = N_FFT
        self.hop_length = HOP_LENGTH
        self.n_mels = 128
        self.mel_filters = whisper.audio.mel_filters
        self.post_init()
    
    def prompt_wrap(self, speech_embeds, language_id):
        batch_size = speech_embeds.size(0)
        prompt_ids = language_id.unsqueeze(1) * self.num_prompts_per_lang + torch.arange(
            self.num_prompts_per_lang, device=speech_embeds.device
        )  # (batch_size, 10)
        prompt_embeds = self.trainable_prompts(prompt_ids)
        wrapped_embeds = torch.cat([prompt_embeds, speech_embeds], dim=1)
        return wrapped_embeds
    
    # def prompt_wrap(self, speech_embeds, language_id, speech_lens):
    #     head_embed = self.language_model.model.embed_tokens(self.qwen3_prompt_head_token.to(speech_embeds.device))
    #     tail_embed = self.language_model.model.embed_tokens(self.qwen3_prompt_tail_token.to(speech_embeds.device))

    #     batch_size = speech_embeds.size(0)
    #     head_embed = head_embed.unsqueeze(0).expand(batch_size, -1, -1)
    #     tail_embed = tail_embed.unsqueeze(0).expand(batch_size, -1, -1)

    #     prompt_ids = language_id.unsqueeze(1) * self.num_prompts_per_lang + torch.arange(
    #         self.num_prompts_per_lang, device=speech_embeds.device
    #     )  # (batch_size, 10)
    #     prompt_embeds = self.trainable_prompts(prompt_ids)
    #     wrapped_embeds = torch.cat([head_embed, prompt_embeds, speech_embeds, tail_embed], dim=1)
        
    #     speech_lens += head_embed.shape[1] + self.num_prompts_per_lang
    #     max_seq = wrapped_embeds.shape[1]
    #     positions = torch.arange(max_seq, device=speech_embeds.device).expand(batch_size, max_seq)
    #     speech_masks = positions <= speech_lens.unsqueeze(1)
    #     tail_masks = positions > max(speech_lens)
    #     # speech_masks = torch.ones(batch_size, speech_lens, device=speech_embeds.device)
    #     return wrapped_embeds, speech_masks | tail_masks
    
    def log_mel_spectrogram(
        self,
        audio: torch.Tensor,
        ilens: torch.Tensor = None,
    ) -> torch.Tensor:
        """Use log-mel spectrogram computation native to Whisper training"""
        window = torch.hann_window(self.win_length).to(audio.device)
        stft = torch.stft(
            audio, self.n_fft, self.hop_length, window=window, return_complex=True
        )

        # whisper deletes the last frame by default (Shih-Lun)
        magnitudes = stft[..., :-1].abs() ** 2

        filters = self.mel_filters(audio.device, self.n_mels)
        mel_spec = filters @ magnitudes

        log_spec = torch.clamp(mel_spec, min=1e-10).log10()

        if ilens is not None:
            olens = ilens // self.hop_length
        else:
            olens = None

        log_spec = torch.maximum(
            log_spec,
            log_spec.view(audio.size(0), -1).max(dim=-1)[0][:, None, None] - 8.0,
        )
        log_spec = (log_spec + 4.0) / 4.0

        return log_spec, olens

    def dual_encoder(self, input_features, raw_wav_lengths, feature_attention_mask):
        feats, feats_lens = self.log_mel_spectrogram(input_features, raw_wav_lengths)
        feats = feats.to(torch.bfloat16)
        max_len = feats.size(-1)  
        positions = torch.arange(max_len, device=feats_lens.device).expand(len(feats_lens), max_len)
        # 将长度tensor扩展为 [batch_size, max_len]
        lengths_expanded = feats_lens.unsqueeze(1).expand(-1, max_len)
        # 生成mask矩阵 (小于等于长度的位置为1，大于长度的位置为0)
        feats_mask = (positions < lengths_expanded).float()
        # encoder_out, encoder_lens = self.speech_model1(feats, feats_lens) # 2 times subsampling
        audio_feat_lengths, audio_output_lengths = self._get_feat_extract_output_lengths(
                feats_mask.sum(-1)
            )
        batch_size, _, max_mel_seq_len = feats.shape
        max_seq_len = (max_mel_seq_len - 2) // 2 + 1

        if max_mel_seq_len % 2 == 1:
            max_seq_len += 1
        # Create a sequence tensor of shape (batch_size, max_seq_len)
        seq_range = (
            torch.arange(0, max_seq_len, dtype=audio_feat_lengths.dtype, device=audio_feat_lengths.device)
            .unsqueeze(0)
            .expand(batch_size, max_seq_len)
        )
        
        lengths_expand = audio_feat_lengths.unsqueeze(1).expand(batch_size, max_seq_len)
        # Create mask
        padding_mask = seq_range >= lengths_expand

        audio_attention_mask_ = padding_mask.view(batch_size, 1, 1, max_seq_len)

        audio_attention_mask = audio_attention_mask_.to(
            dtype=torch.bool, device=self.speech_model1.conv1.weight.device
        )           

        encoder_out = self.speech_model1(feats, attention_mask=audio_attention_mask)
        encoder_out = encoder_out.last_hidden_state
        
        # Transformer Encoder src mask
        # 为每个批次生成 (seq_len, seq_len) 形状的掩码
        speech_mask = audio_attention_mask.squeeze(1)
        speech_mask = speech_mask.squeeze(1)
        speech_embeds = self.speech_transformer_adapter1(encoder_out, src_key_padding_mask=speech_mask)
        speech_lens = (~speech_mask).sum(dim=-1)
        lid_embeds = torch.mean(speech_embeds, dim=1)
        
        self.speech_model2.to(torch.float32)
        speech_mask2 = feature_attention_mask
        encoder_out2 = self.speech_model2(input_features.to(torch.float32), speech_mask2).projected_states
        encoder_out2 = encoder_out2.to(torch.bfloat16)
        speech_embeds2 = self.ssl2whisper(encoder_out2)
        speech_mask2 = speech_mask
        if speech_embeds.shape[1] > speech_embeds2.shape[1]:
            speech_embeds2 = torch.cat((speech_embeds2, speech_embeds[:, speech_embeds2.shape[1]:, :]), dim=1)            
        elif speech_embeds.shape[1] < speech_embeds2.shape[1]:
            speech_embeds2 = speech_embeds2[:, :speech_embeds.shape[1], :]
        
        # Transformer Encoder src mask
        speech_embeds2 = self.speech_transformer_adapter2(speech_embeds2, src_key_padding_mask=speech_mask2)
        lid_embeds2 = torch.mean(speech_embeds2, dim=1)
        
        # 1.1 LID_CTC融合, Dual speech embeds -> mix speech embeds
        lid_embeds = lid_embeds + lid_embeds2
        lid_embeds = self.language_class(lid_embeds)

        language_probs = torch.softmax(lid_embeds, dim=-1)  # (N, num_languages)
        _, predicted_lid = torch.max(language_probs, dim=1)  # (N,)
        current_weights = self.weight_network() * 2  # (N,)
        selected_weights = current_weights[predicted_lid]
        weight1 = 2 - selected_weights.unsqueeze(1)
        weight2 = selected_weights.unsqueeze(1)

        # # 扩展 weight 以匹配 speech_embedding 的维度
        B, T, C = speech_embeds.shape
        weight1 = weight1.unsqueeze(1)  # (B, 1, 1)
        weight1 = weight1.expand(B, T, C)  # (B, T, N)

        weight2 = weight2.unsqueeze(1)  # (B, 1, 1)
        weight2 = weight2.expand(B, T, C)  # (B, T, N)
        speech_embeds = weight1 * speech_embeds + weight2 * speech_embeds2
        return speech_embeds, lid_embeds, speech_lens      

    def forward(
        self,
        input_ids: torch.LongTensor = None,
        input_features: torch.FloatTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
        feature_attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_values: Optional[List[torch.FloatTensor]] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        lid: Optional[torch.Tensor] = None,
    ):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        target_device = self.language_model.device

        if inputs_embeds is None:
            # 1. Extract the input embeddings
            inputs_embeds = self.get_input_embeddings()(input_ids)

        if input_features is not None and input_ids.shape[1] != 1:
            input_features = input_features.to(target_device)
            feature_attention_mask = feature_attention_mask.to(target_device)
            # 1. Dual Encoder -> Dual speech embeds, lid embeds
            # pdb.set_trace()
            speech_embeds, lid_embeds, speech_lens = self.dual_encoder(input_features, (~feature_attention_mask).sum(-1), feature_attention_mask)
            # ctc_proj = self.ctc_linear(speech_embeds).transpose(0, 1).to(torch.float32)
            # ctc_proj = ctc_proj.log_softmax(dim=-1)
            # ctc_greedy_results = ctc_greedy_search(
            #         ctc_proj.transpose(0, 1), speech_lens, 151669)
            # for ctc_result in ctc_greedy_results:
            #     ctc_tokens = ctc_result.tokens
            #     print(self.tokenizer.decode(ctc_tokens, skip_special_tokens=True))

            # 2. Projector
            speech_embeds = self.downsample_projector1(speech_embeds)
            speech_embeds = self.downsample_projector2(speech_embeds)
            # 计算下采样后的长度 (每次下采样率是2，总共下采样4倍)
            speech_lens = (speech_lens - 2) // 2 + 1  # 第一次下采样
            speech_lens = (speech_lens - 2) // 2 + 1  # 第一次下采样

            # 3. wrap speech_embeds with prompts)
            speech_embeds = self.prompt_wrap(speech_embeds, lid)
            speech_lens += self.num_prompts_per_lang

            # # 4. prepare inputs for llm
            audio_features = speech_embeds
            audio_output_lengths = speech_lens
            # labels is processed in dataloader
            inputs_embeds, attention_mask, _, position_ids, final_input_ids = self._merge_input_ids_with_audio_features(
                audio_features, audio_output_lengths, inputs_embeds, input_ids, attention_mask, labels
            )
            # batch_size, _, embed_size = inputs_embeds.shape
            # padding_length = 256 - inputs_embeds.shape[1]
            # zero_tensor = torch.zeros(batch_size, padding_length, embed_size, device=inputs_embeds.device).bfloat16()
            # # 拼接原始张量和全零张量
            # inputs_embeds = torch.cat((inputs_embeds, zero_tensor), dim=1)
            # padding_mask = torch.zeros(batch_size, padding_length, device=attention_mask.device).bfloat16()
            # attention_mask = torch.cat((attention_mask, padding_mask), dim=1)
            # position_ids = torch.arange(512).unsqueeze(0).to(attention_mask.device).bfloat16()               # [1, 117]

        else:
            attention_mask = None
        # 语言模型前向
        # pdb.set_trace()
        outputs = self.language_model(
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict
        )

        logits = outputs[0]

        loss = None
        if labels is not None:
            # Shift so that tokens < n predict n
            if attention_mask is not None:
                shift_attention_mask = attention_mask[..., 1:]
                shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
                shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
            else:
                shift_logits = logits[..., :-1, :].contiguous()
                shift_labels = labels[..., 1:].contiguous()
            # Flatten the tokens
            loss_fct = nn.CrossEntropyLoss()
            loss = loss_fct(
                shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
            )

        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output

        return Qwen2AudioCausalLMOutputWithPast(
            loss=loss,
            logits=logits,
            past_key_values=outputs.past_key_values,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            attention_mask=None, #attention_mask,
        )        


    def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
        """
        Computes the output length of the convolutional layers and the output length of the audio encoder
        """
        # avg pooling only
        output_lengths = input_lengths // 2
        return input_lengths, output_lengths

    def _merge_input_ids_with_audio_features(
        self, audio_features, num_audio_tokens, inputs_embeds, input_ids, attention_mask, labels
    ):
        """
        Merge input_ids with with audio features into final embeddings

        Args:
            audio_features (`torch.Tensor` of shape `(num_audios, max_audio_tokens, embed_dim)`):
                All audio vectors of all audios in the batch
            num_audio_tokens (`torch.LongTensor` of shape `(num_audios)`):
                The length of audio embeddings of each audio as stacked in `audio_features`
            inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, embed_dim)`):
                Token embeddings before merging with audio embeddings
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Input_ids of tokens, possibly filled with audio token
            attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Mask to avoid performing attention on padding token indices.
            labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*)
                labels need to be recalculated to support training (if provided)
        Returns:
            final_embedding, final_attention_mask, final_labels, position_ids, final_input_ids

        Explanation:
            each audio has variable length embeddings, with length specified by num_audio_tokens
            audio_features is concatenation of all audio embed vectors
            task: fill each <|AUDIO|> with the correct number of audio embeddings
            Example:
                X (5 tokens), Y (3 tokens), Z (8 tokens)
                X, Y are in the same sequence (in-context learning)
            if right padding
                input_ids: [
                    a b c d e f X g h i j k Y l m
                    o p q r Z s t u v _ _ _ _ _ _
                ]
                input_ids should be: [
                    a b c d e f X X X X X g h i j k Y Y Y l m
                    o p q r Z Z Z Z Z Z Z Z s t u v _ _ _ _ _
                ]
                labels should be: [
                    a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
                    o p q r _ _ _ _ _ _ _ _ s t u v _ _ _ _ _
                ]
            elif left padding
                input_ids: [
                    a b c d e f X g h i j k Y l m
                    _ _ _ _ _ _ o p q r Z s t u v
                ]
                input_ids should be: [
                    a b c d e f X X X X X g h i j k Y Y Y l m
                    _ _ _ _ _ o p q r Z Z Z Z Z Z Z Z s t u v
                ]
                labels should be: [
                    a b c d e f _ _ _ _ _ g h i j k _ _ _ l m
                    _ _ _ _ _ o p q r _ _ _ _ _ _ _ _ s t u v
                ]
            Edge cases:
                * If tokens are same but audio token sizes are different, then cannot infer left or right padding
                ```python
                url1 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
                audio1, _ = librosa.load(BytesIO(urlopen(url1).read()), sr=processor.feature_extractor.sampling_rate)
                url2 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav"
                audio2, _ = librosa.load(BytesIO(urlopen(url2).read()), sr=processor.feature_extractor.sampling_rate)
                prompts = [
                    "[INST] <|AUDIO|>\nWhat is that in this audio? [/INST]",
                    "[INST] <|AUDIO|>\nWhat is that in this audio? [/INST]",
                ]
                inputs = processor(text=prompts, audios=[audio1, audio2], return_tensors='pt', padding=True).to("cuda")
                    audio1 has 101 tokens, while audio2 has 72 tokens
                ```

                input_ids: [
                    a b c d X g h
                    i j Y k l m n
                ]
                where X is 3 tokens while Y is 5, this mean after merge
                if left-padding (batched generation)
                    input_ids should be: [
                        _ _ a b c d X X X g h
                        i j Y Y Y Y Y k l m n
                    ]
                elif (right padding) (training)
                    input_ids should be: [
                        a b c d X X X g h _ _
                        i j Y Y Y Y Y k l m n
                    ]
        """       
        num_audios, max_audio_tokens, embed_dim = audio_features.shape
        audio_features_mask = torch.arange(max_audio_tokens).expand(num_audios, max_audio_tokens).to(
            num_audio_tokens.device
        ) < num_audio_tokens.unsqueeze(1)
        masked_audio_features = audio_features[audio_features_mask].view(-1, embed_dim)
        batch_size, sequence_length = input_ids.shape
        _left_padding = torch.any(attention_mask[:, 0] == 0)
        _right_padding = torch.any(attention_mask[:, -1] == 0)

        left_padding = True
        if batch_size > 1:
            if _left_padding and not _right_padding:
                left_padding = True
            elif not _left_padding and _right_padding:
                left_padding = False
            elif not _left_padding and not _right_padding:
                # both side is 1, so cannot tell
                #left_padding = self.padding_side == "left"
                left_padding = False
            else:
                # invalid attention_mask
                raise ValueError(f"both side of attention_mask has zero, invalid. {attention_mask}")

        # 1. Create a mask to know where special audio tokens are
        special_audio_token_mask = input_ids == self.audio_token_index
        num_special_audio_tokens = torch.sum(special_audio_token_mask, dim=-1)

        # In case the Audio model or the Language model has been offloaded to CPU, we need to manually
        # set the corresponding tensors into their correct target device.
        target_device = inputs_embeds.device
        attention_mask = attention_mask.to(target_device)
        input_ids = input_ids.to(target_device)
        num_audio_tokens = num_audio_tokens.to(target_device)
        batch_indices, non_audio_indices = torch.where(
            (input_ids != self.audio_token_index) & (attention_mask == 1)
        )

        # 2. Compute the positions where text should be written
        # Calculate new positions for text tokens in merged audio-text sequence.
        # `special_audio_token_mask` identifies audio tokens. Each audio token will be replaced by `audio_feat_lengths - 1` text tokens.
        # `torch.cumsum` computes how each audio token shifts subsequent text token positions.
        token_placeholder_num = torch.zeros_like(input_ids)
        token_placeholder_num[special_audio_token_mask] = num_audio_tokens.long() - 1
        token_placeholder_num = token_placeholder_num + 1
        new_token_positions = torch.cumsum(token_placeholder_num, -1) - 1
        max_token_num = token_placeholder_num.sum(-1).max()
        nb_audio_pad = max_token_num - 1 - new_token_positions[:, -1]
        if left_padding:
            new_token_positions += nb_audio_pad[:, None]  # offset for left padding
        text_to_overwrite = new_token_positions[batch_indices, non_audio_indices]
        batch_indices, non_audio_indices, text_to_overwrite = (
            batch_indices.to(target_device),
            non_audio_indices.to(target_device),
            text_to_overwrite.to(target_device),
        )

        # 3. Create the full embedding, already padded to the maximum position
        final_embedding = torch.zeros(
            batch_size, max_token_num, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
        )
        final_attention_mask = torch.zeros(
            batch_size, max_token_num, dtype=attention_mask.dtype, device=inputs_embeds.device
        )
        final_input_ids = torch.full(
            (batch_size, max_token_num), self.pad_token_id, dtype=input_ids.dtype, device=inputs_embeds.device
        )

        # 4. Fill the embeddings based on the mask. If we have ["hey" "<audio>", "how", "are"]
        # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the audio features
        final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_audio_indices]

        final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_audio_indices]
        final_input_ids[batch_indices, text_to_overwrite] = input_ids[batch_indices, non_audio_indices]
        final_labels = None
        if labels is not None:
            labels = labels.to(target_device)
            final_labels = torch.full_like(final_attention_mask, self.ignore_index).to(torch.long)
            final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_audio_indices]

        # 5. Fill the embeddings corresponding to the audios. Anything that is still zeros needs filling
        audio_to_overwrite = torch.full(
            (batch_size, max_token_num), True, dtype=torch.bool, device=inputs_embeds.device
        )
        audio_to_overwrite[batch_indices, text_to_overwrite] = False
        seq_indices = torch.arange(max_token_num).unsqueeze(0).to(target_device)
        seq_indices = seq_indices.expand(batch_size, max_token_num)

        if left_padding:
            # exclude padding on the left
            max_token_num = max_token_num.to(target_device)
            val = (max_token_num - seq_indices) <= (
                token_placeholder_num.sum(-1) - (attention_mask == 0).long().sum(-1)
            )[:, None]
        else:
            # exclude padding on the right
            val = seq_indices < (token_placeholder_num.sum(-1) - (attention_mask == 0).long().sum(-1))[:, None]

        audio_to_overwrite &= val
        if audio_to_overwrite.sum() != num_audio_tokens.sum():
            
            raise ValueError(
                f"The input provided to the model are wrong. The number of audio tokens is {num_special_audio_tokens} while"
                f" the number of audio given to the model is {num_audios}. This prevents correct indexing and breaks batch generation."
            )

        final_embedding[audio_to_overwrite] = (
            masked_audio_features.contiguous().reshape(-1, embed_dim).to(target_device)
        )
        final_attention_mask |= audio_to_overwrite
        position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)

        # print(f"final_embedding: {final_embedding.shape}")
        # print(f"final_attention_mask: {final_attention_mask.shape}")
        # print(f"final_labels: {final_labels.shape}")
        # print(f"sequence_length: {sequence_length}")
        # torch.set_printoptions(threshold=float('inf'))


        # since input text is padded to seq_len, all inputs should be truncated to seq_len
        # final_embedding = final_embedding[:,:sequence_length,:]
        # final_attention_mask = final_attention_mask[:, :sequence_length]
        # if final_labels is not None:
        #     final_labels = final_labels[:,:sequence_length]
        # position_ids = position_ids[:, :sequence_length]
        # final_input_ids = final_input_ids[:, :sequence_length]
        # if parallel_state.get_tensor_model_parallel_rank() == 0:
        #     print("after: ", final_input_ids)
        return final_embedding, final_attention_mask, final_labels, position_ids, final_input_ids


class IdealLLMForConditionalGeneration(IdealLLMModel):
    def __init__(self, config: IdealLLMConfig):
        super().__init__(config)
        # self.language_model = Qwen2AudioForCausalLM(config)
