import logging
import os
import sys

sys.path.append('../../')
import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.transformer.encoder import TransformerEncoder
from wenet.transformer.ctc import CTC

from wenet.utils.common import add_sos_eos, add_sos_eos4speech_llm
# from wenet.utils.gxl_utils import Whisper_Utils
import soundfile as sf
from wenet.transformer.search import ctc_greedy_search


mls_dict = {"dutch":0, "french":1, "italian":2, "portuguese":3, "english":4, "german":5, "polish":6, "spanish":7}           
# tensor([0.9834, 0.9529, 0.9800, 0.9926, 0.9152, 0.9473, 1.0444, 0.9617]                

class GxlConv1dSubsampling2(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x

class GxlConv1dSubsampling4(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x

class GxlConv1dSubsampling6(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 3),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling8(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 8),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x

def gxl_cat_case_by_case(input_tensors_list, control_list):
    # 创建一个用于拼接的元组列表
    concat_list = [input_tensors_list[i] for i in control_list]
    # 在维度 0 上拼接张量
    result = torch.cat(concat_list, dim=0)
    return result


class GatedNetwork(nn.Module):
    def __init__(self, input_dim):
        super(GatedNetwork, self).__init__()
        self.fc = nn.Linear(input_dim * 3, input_dim)
    
    def forward(self, language_embedding, speech_embedding_1, speech_embedding_2):
        B, T, N = speech_embedding_1.size()
        
        # 扩展 language_embedding 以匹配 speech_embedding 的时间维度
        language_embedding_expanded = language_embedding.expand(B, T, N)
        
        # 将 language_embedding_expanded, speech_embedding_1 和 speech_embedding_2 连接起来
        combined = torch.cat((language_embedding_expanded, speech_embedding_1, speech_embedding_2), dim=-1)
        
        # 通过全连接层计算门控向量，并使用 Sigmoid 激活函数
        gate = torch.sigmoid(self.fc(combined))
        
        return gate

class WeightNetwork(nn.Module):
    def __init__(self, input_dim):
        super(WeightNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim * 2, 1)
        self.fc2 = nn.Linear(input_dim * 2, 1)
        self.language_class1 = nn.Linear(1280, len(mls_dict.keys()))
        self.language_class2 = nn.Linear(1280, len(mls_dict.keys()))
    
    def forward(self, lid_embedding, speech_embedding_1, speech_embedding_2):
        N, T, C = speech_embedding_1.size()
        
        # 对 speech_embedding 进行时间维度上的平均
        speech_embedding_1_mean = speech_embedding_1.mean(dim=1)  # (N, C)
        speech_embedding_2_mean = speech_embedding_2.mean(dim=1)  # (N, C)
        speech_embedding_1_mean = self.language_class1(speech_embedding_1_mean)
        speech_embedding_2_mean = self.language_class2(speech_embedding_2_mean)
        
        # 将 lid_embedding 从 (N, 1, C) 变为 (N, C)
        lid_embedding = lid_embedding.squeeze(1)
        
        # 将 lid_embedding 与 speech_embedding 的时间平均值连接起来
        combined_1 = torch.cat((lid_embedding, speech_embedding_1_mean), dim=-1)  # (N, 2C)
        combined_2 = torch.cat((lid_embedding, speech_embedding_2_mean), dim=-1)  # (N, 2C)
        
        # 通过全连接层计算标量权重，并使用 Sigmoid 激活函数
        weight1 = torch.sigmoid(self.fc1(combined_1))  # (N, 1)
        weight2 = torch.sigmoid(self.fc2(combined_2))  # (N, 1)
        
        unnormalized_weights = torch.cat((weight1, weight2), dim=-1)  # (N, 2)

        normalized_weights = torch.softmax(unnormalized_weights, dim=-1)  # (N, 2)

        scaled_weights = 2 * normalized_weights  # (N, 2)
        
        return scaled_weights[:, 0:1], scaled_weights[:, 1:2]

# 定义一个模块来学习权重
class WeightLearner(nn.Module):
    def __init__(self, num_languages):
        super(WeightLearner, self).__init__()
        self.weights = nn.Parameter(torch.full((num_languages,), 0.5))  # 初始化为0.5

    def forward(self):
        return torch.sigmoid(self.weights)  # 将权重归一化到0-1之间

class Salmonn_Model(nn.Module):
    def __init__(self, encoder, encoder2, llm_path, lora=True, lora_alpha=32,
                 lora_rank=8, lora_dropout=0.1, low_resource=False,
                 prompt=None,# 不会有新值的prompt传入
                #  prompt_pattern="<|user|>\nspeech : <SpeechHere>\nThe speech language is <SpeechHere>\n{}<|end|>\n<|assistant|>",
                 prompt_pattern="<|user|>\nspeech : <SpeechHere>\n{}<|end|>\n<|assistant|>",
                 # <|user|>\nQuestion<|end|>\n<|assistant|>
                 llama_model_generate_max_length=200, llama_model_generate_min_length=1,
                 llama_model_generate_num_beams=4, llama_model_generate_do_sample=True, llama_model_generate_top_p=0.9,
                 llama_model_generate_repetition_penalty=1.0, llama_model_generate_length_penalty=1.0,
                 llama_model_generate_temperature=1.0, load_epoch_ckpt=False, load_step_ckpt=False,
                 load_eval_ckpt=False, ckpt_path="", is_inference=False, downsample_rate=1,ctc_weight=0,*args, **kwargs):
        """"""
        super().__init__()
        self.downsample_rate = downsample_rate
        self.prompt = prompt

        self.speech_encoder = encoder
        self.speech_encoder2 = encoder2

        """
        hubert的dim是1024， whisper的dim的1280， 通过线性层转换
        """
        self.encoder_type = kwargs.get("encoder_type", "whisper")
        self.encoder2_type = kwargs.get("encoder_type", None)
        logging.info(f'耿雪龙： encoder_type: {self.encoder_type}')
        self.hubert_dim2whisper_dim = nn.Linear(encoder.output_size(),
                                                1280) if self.encoder_type == "hubert" else nn.Identity()
        self.ln_speech = nn.LayerNorm(3072) # 3072
        self.transformer_num_blocks = 9

        # 连接层, 51.6M
        self.speech_transformer = TransformerEncoder(
           input_size=1280,
           output_size=1280,
           attention_heads=4,
           linear_units=2560,
           num_blocks=self.transformer_num_blocks,
           dropout_rate=0.1,
           positional_dropout_rate=0.1,
           attention_dropout_rate=0.0,
           input_layer="linear",
           pos_enc_layer_type="abs_pos",
           normalize_before=True
        )

        # LLM,
        # Atom-7B , 6,684.066406M 参数, llama-7b
        """
        atom-7b 模型介绍：
        特征维度： 4096，
        词数： 65000
        encoder_layers_num: 32
        model_size: 6684M 
        """
        self.low_resource = low_resource
        if not low_resource:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float32 if is_inference else torch.float16,
                # torch_dtype=torch.float16,
                trust_remote_code=False
            )
        else:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float16,
                load_in_8bit=True,
                device_map="auto",
                trust_remote_code=False
            )

        self.max_length = llama_model_generate_max_length
        self.min_length = llama_model_generate_min_length
        self.num_beams = llama_model_generate_num_beams
        self.do_sample = llama_model_generate_do_sample
        self.top_p = llama_model_generate_top_p
        self.repetition_penalty = llama_model_generate_repetition_penalty
        self.length_penalty = llama_model_generate_length_penalty
        self.temperature = llama_model_generate_temperature
        self.load_epoch_ckpt = load_epoch_ckpt
        self.load_step_ckpt = load_step_ckpt
        self.load_eval_ckpt = load_eval_ckpt

        # lora
        self.lora = lora
        if lora:
            logging.info("耿雪龙： 使用lora了")
            target_modules = ['W_pack', 'o_proj', 'gate_proj', 'down_proj']
            if is_inference:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=True,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            else:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            self.llama_model = get_peft_model(self.llama_model, self.peft_config)

        # tokenizer
        # self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)
        self.llama_tokenizer = AutoTokenizer.from_pretrained(
            llm_path, use_fast=False, trust_remote_code=True)
        """
        设置分词器的pad_token和padding的方向。
        """

        #self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
        self.llama_tokenizer.padding_side = "right"
        self.ctc_weight = ctc_weight
        # self.ctc = CTC(100352, 1280, blank_id=100270) # mini
        self.ctc = CTC(32064, 1280, blank_id=32060) # small
        # self.ctc = CTC(32064, self.llama_model.config.hidden_size, blank_id=32060)
        # self.ctc.ctc_lo.weight.data = self.llama_model.lm_head.weight.data
        # self.ctc.ctc_lo.bias.data.zero_()
        # self.ctc.float()

        # 中间层与LLM的耦合口：
        # self.speech_llama_proj = nn.Linear(
        #     1280, self.llama_model.config.hidden_size)
        # self.speech_llama_proj1 = nn.Linear(
        #     self.llama_model.config.hidden_size, 2560)
        self.speech_llama_proj1 = nn.Linear(
            1280, 2560)
        self.speech_llama_proj2 = nn.Linear(
            2560, self.llama_model.config.hidden_size)

        self.prompt_pattern = prompt_pattern

        self.down_sample_2 = nn.Identity()
        if self.downsample_rate == 2:
            self.down_sample_2 = GxlConv1dSubsampling2(1280, 1280)
            # self.down_sample_2 = GxlConv1dSubsampling2(self.llama_model.config.hidden_size, self.llama_model.config.hidden_size)
        elif self.downsample_rate == 4:
            self.down_sample_2 = GxlConv1dSubsampling4(1280, 1280)
        elif self.downsample_rate == 8:
            self.down_sample_2 = GxlConv1dSubsampling8(1280, 1280)
        elif self.downsample_rate == 6:
            self.down_sample_2 = GxlConv1dSubsampling6(1280, 1280)
        self.gxl_num = 100

        self.gelu = nn.GELU()

        self.language_loss = nn.CrossEntropyLoss()
        self.language_class = nn.Linear(1280, len(mls_dict.keys()))

        if self.speech_encoder2 is not None:
            self.hubert_dim2whisper_dim2 = nn.Linear(encoder2.output_size(),
                                                1280) #if self.encoder2_type != "whisper" else nn.Identity()
            self.speech_transformer2 = TransformerEncoder(
                input_size=1280,
                output_size=1280,
                attention_heads=4,
                linear_units=2560,
                num_blocks=self.transformer_num_blocks,
                dropout_rate=0.1,
                positional_dropout_rate=0.1,
                attention_dropout_rate=0.0,
                input_layer="linear",
                pos_enc_layer_type="abs_pos",
                normalize_before=True
                )
            
            # self.mix_encoder_proj = nn.Linear(
            #     2560, 1280)
            
            # self.ctc2 = CTC(32064,1280,blank_id=32060)

            # self.gated_network = GatedNetwork(1280)
            # self.weight_network = WeightNetwork(len(mls_dict.keys()))
            self.weight_network = WeightLearner(len(mls_dict.keys()))

    def forward(self,
                batch,
                device,
                ):
        """"""
        # 32051 + mls_dict()
        task_control_list = batch.get('task_control_list', None)
        language_list = batch.get('language_list', None)
        if language_list is not None:
            # language_id = torch.tensor([mls_dict[language] + 32051 for language in language_list]).unsqueeze(1).to(device)
            language_id = torch.tensor([mls_dict[language] for language in language_list]).unsqueeze(1).to(device)
            # language_id = 0
        if task_control_list is not None:
            logging.info(f'耿雪龙:task_control_list {task_control_list}')
        wavs = batch['feats'].to(device)
        wavs_len = batch['feats_lengths'].to(device)
        labels = batch['target'].to(device)
        labels_len = batch['target_lengths'].to(device)
        prompt = self.prompt

        """
        首先 得到音频编码的特征
        speech_embeds ： 为输入LLM的音频编码特征， 已经对齐特征维度。 shape:(b, t, 4096)
        """
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)

        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        B, T, C = speech_embeds.shape
        lid_embeds = torch.mean(speech_embeds, dim=1)
        ##speech_embeds = torch.cat([lid_embeds.unsqueeze(1), speech_embeds], dim=1)
        ##speech_lens = speech_lens + 1
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)

        speech_lens = speech_masks.sum(dim=2)
        # torch.cat([language_id, labels], dim=1)
        # if self.ctc_weight != 0:
        #     loss_ctc, ctc_probs = self.ctc(speech_embeds, speech_lens, labels, labels_len)
        # else:
        #     loss_ctc = torch.tensor([0])
        
        # language_embeds = speech_embeds[:, 0:1, :]  # B*1*N
        # speech_embeds = speech_embeds[:, 1:, :]  # B*(T-1)*N

        if self.speech_encoder2 is not None:
            #import pdb; pdb.set_trace()
            speech_embeds2, speech_lens2 = self.speech_encoder2(wavs, wavs_len)
            speech_embeds2 = self.hubert_dim2whisper_dim2(speech_embeds2)

            B, T, C = speech_embeds2.shape
            lid_embeds2 = torch.mean(speech_embeds2, dim=1)
            ##speech_embeds2 = torch.cat([lid_embeds2.unsqueeze(1), speech_embeds2], dim=1)
            ##speech_lens2 = speech_lens2 + 1
            speech_embeds2, speech_masks2 = self.speech_transformer2(speech_embeds2, speech_lens2)

            speech_lens2 = speech_masks2.sum(dim=2)
            if speech_embeds.shape[1] > speech_embeds2.shape[1]:
                speech_embeds = speech_embeds[:, :speech_embeds2.shape[1], :]
                speech_lens -= 1
            elif speech_embeds.shape[1] < speech_embeds2.shape[1]:
                speech_embeds2 = speech_embeds2[:, :speech_embeds.shape[1], :]
                speech_lens2 -= 1
            # if self.ctc_weight != 0:
            #     loss_ctc2, ctc_probs2 = self.ctc2(speech_embeds2, speech_lens2, labels, labels_len)
            # else:
            #     loss_ctc2 = torch.tensor([0])

            # LID_CTC融合
            # language_embeds2 = speech_embeds2[:, 0:1, :]  # B*1*N
            # speech_embeds2 = speech_embeds2[:, 1:, :]  # B*(T-1)*N
            # 门控相加
            lid_embeds = lid_embeds + lid_embeds2
            # gate = self.gated_network(lid_embeds.unsqueeze(1), speech_embeds, speech_embeds2)
            # speech_embeds = gate * speech_embeds + (1 - gate) * speech_embeds2
            # 普通相加
            # speech_embeds = speech_embeds + speech_embeds2
            # language_embedding1，2计算权重
            lid_embeds = self.language_class(lid_embeds)

            #06/20前方案，通过speech_embeds计算两个weight
            # weight1, weight2 = self.weight_network(lid_embeds, speech_embeds, speech_embeds2)  # 输出维度为 (N, 1)

            #06/20方案，先预测lid，根据lid选择存储好的权重
            #import pdb; pdb.set_trace()
            language_probs = torch.softmax(lid_embeds, dim=-1)  # (N, num_languages)
            _, predicted_lid = torch.max(language_probs, dim=1)  # (N,)
            current_weights = self.weight_network() * 2  # (N,)
            # print(current_weights)
            predicted_lid[:] = 0
            selected_weights = current_weights[predicted_lid]
            weight1 = 2 - selected_weights.unsqueeze(1)
            weight2 = selected_weights.unsqueeze(1)

            # # 扩展 weight 以匹配 speech_embedding 的维度
            weight1 = weight1.unsqueeze(1)  # (B, 1, 1)
            weight1 = weight1.expand(B, speech_embeds.shape[1], speech_embeds.shape[2])  # (B, T, N)

            weight2 = weight2.unsqueeze(1)  # (B, 1, 1)
            weight2 = weight2.expand(B, speech_embeds.shape[1], speech_embeds.shape[2])  # (B, T, N)
            speech_embeds = weight1 * speech_embeds + weight2 * speech_embeds2



        loss_lid = self.language_loss(lid_embeds, language_id.squeeze(1))

        # speech_embeds = self.speech_llama_proj(speech_embeds)
        # torch.cat([language_id+ 32051, labels], dim=1),
        if self.ctc_weight != 0:
            loss_ctc, ctc_probs = self.ctc(speech_embeds, speech_lens, labels,
                                       labels_len)
        else:
            loss_ctc = torch.tensor([0])
        ## speech_embeds = speech_embeds[:, 1:, :]

        # CTC后，内容相关表征再下采样
        speech_embeds = self.down_sample_2(speech_embeds)

        # num_frames_to_discard = speech_embeds.size(1) % self.downsample_rate
        # x = speech_embeds
        # if num_frames_to_discard != 0 :
        #     x = x[:, :-num_frames_to_discard, :]
        # x = x.contiguous()
        # x = x.view(B, speech_embeds.size(1) // self.downsample_rate, C * self.downsample_rate)
        # speech_embeds = x

        # CTC后，可能需要多个linear进行表征空间转换
        speech_embeds = self.speech_llama_proj1(speech_embeds)
        speech_embeds = self.gelu(speech_embeds)
        speech_embeds = self.speech_llama_proj2(speech_embeds)
        speech_embeds = self.ln_speech(speech_embeds)

        # language_embeds = self.language_llama_proj(language_embeds)
        # language_embeds = self.ln_speech(language_embeds)

        """
        接着处理prompt， 将其首先使用分词器编码成数字序列shape(1,N), 接着使用LLM的Embedding层对其进行编码shape(1,N, 4096)
        embed_tokens： nn.Embedding(65000, 4096). 
        知识补充：在模型的输入中，可以选择是否在序列的开头和结尾添加一些特殊的token，如CLS、SEP等，
        以适应模型的要求。add_special_tokens=False 表示不添加特殊token
        """
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        if task_control_list is not None:
            if self.gxl_num <  20:
                self.gxl_num += 1
                logging.info(f'耿雪龙: task_control_list: {task_control_list}')
            assert isinstance(prompt, list), "prompt must be a list when task_control_list is not None"
            prompt_id = task_control_list[0]
            assert set(task_control_list) == {prompt_id}, f"task_control_list must be same number,task_control_list: {task_control_list}"
        else:
            prompt_id = 2
            # assert isinstance(prompt, str), "prompt must be a string when task_control_list is None"
        if isinstance(self.prompt, list):
            prompt = self.prompt[prompt_id]

        if self.gxl_num < 10:
            logging.info(f'耿雪龙: prompt: {prompt}')
            self.gxl_num += 1
        prompt_left, prompt_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')
        if self.gxl_num < 3:
            logging.info(f'耿雪龙: prompt_left: {prompt_left}, prompt_right: {prompt_right}')
            self.gxl_num = 3
        # prompt_left: USER: <Speech>
        # prompt_middle: The speech language is
        # prompt_right: </Speech> Describe the speech.\nASSISTANT:
        prompt_left_ids = self.llama_tokenizer(  # shape: [1, 7]
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 7, 4096])
        prompt_left_ids = prompt_left_ids.repeat_interleave(B, dim=0)  # torch.Size([17, 7])

        prompt_right_ids = self.llama_tokenizer(
            prompt_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        prompt_right_embeds = embed_tokens(prompt_right_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 14, 4096])
        prompt_right_ids = prompt_right_ids.repeat_interleave(B, dim=0)

        # prompt_middle_ids = self.llama_tokenizer(
        #     prompt_middle,
        #     return_tensors="pt",
        #     add_special_tokens=False
        # ).to(speech_embeds.device).input_ids
        # prompt_middle_embeds = embed_tokens(prompt_middle_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 14, 4096])
        # prompt_middle_ids = prompt_middle_ids.repeat_interleave(B, dim=0)

        """
        处理labels, labels本本身是已经padding过的，shape:(B , T)
        首先对其经过sos_eos处理， 得到两个padded_labels_in和padded_labels_out,
        labels_in不加入bos 
        然后使用Embedding层对padded_labels_in进行编码
        接着得到bos_ids和bos_embeds, eos_ids和eos_embeds.shape: (B,1),  (B,1, 4096)
        """
        labels_ids = labels  # torch.Size([17, 13])
        labels_in, labels_out = add_sos_eos4speech_llm(labels_ids, self.llama_tokenizer.bos_token_id,
                                                       self.llama_tokenizer.eos_token_id, ignore_id=-100)
        labels_in_embeds = embed_tokens(labels_in)  # torch.Size([17, 13, 4096])
        bos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=speech_embeds.device) * self.llama_tokenizer.bos_token_id
        bos_embeds = embed_tokens(bos_ids)  # torch.Size([17, 1, 4096])

        eos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 2
                             device=speech_embeds.device) * self.llama_tokenizer.eos_token_id
        eos_embeds = embed_tokens(eos_ids)

        """
        将左prompt 音频 右prompt label_in 的高纬特征拼接在一起。
        将左prompt 音频 右prompt label_out 的id拼接在一起作为ground truth
        """
        speech_embeds_B, speech_embeds_T = speech_embeds.size(0), speech_embeds.size(1)
        speech_ids = torch.ones([speech_embeds_B, speech_embeds_T], dtype=torch.long, device=speech_embeds.device)
        concat_ids = torch.cat([bos_ids, prompt_left_ids, speech_ids, prompt_right_ids], dim=1)
        filled_ids = concat_ids.fill_(-100)  # In CrossEntropyLoss(), ignore_index = -100
        embeds = torch.cat(
            [bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds,
             labels_in_embeds, eos_embeds], dim=1)
        labels = torch.cat([filled_ids, labels_out], dim=1)

        if self.low_resource:
            embeds = embeds.to(torch.int8).to(torch.float16)
        outputs = self.llama_model(
            inputs_embeds=embeds,
            labels=labels,
        )
        loss_decoder = outputs['loss']  # 0维张量，纯数字
        # if self.ctc_weight != 0 and self.speech_encoder2 is not None:
        #     loss_ctc = (loss_ctc + loss_ctc2) / 2
        if self.ctc_weight != 0:
            loss = self.ctc_weight * loss_ctc + (1-self.ctc_weight) * loss_decoder + 0.05 * loss_lid
        else:
            loss = loss_decoder
        return {"loss": loss, "loss_ctc":loss_ctc, "loss_decoder":loss_decoder, "loss_lid":loss_lid}

    def generate(
            self,
            wavs,
            wavs_len,
            language_id,
            prompt,
    ):
        # import pdb; pdb.set_trace()
        prompt = self.prompt
        logging.info(prompt)
        if self.gxl_num< 10:
            logging.info("prompt: {}".format(prompt))
            self.gxl_num += 1
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)

        B, T, C = speech_embeds.shape
        lid_embeds = torch.mean(speech_embeds, dim=1)
        # speech_embeds = torch.cat([lid_embeds.unsqueeze(1), speech_embeds], dim=1)
        # speech_lens = speech_lens + 1
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        # speech_lens = speech_masks.sum(dim=2)
        # ctc_probs = self.ctc.log_softmax(speech_embeds)
        # ctc_results = ctc_greedy_search(ctc_probs, speech_lens, 32060)
        predict_id = 0 #ctc_results[0].tokens[0]
        # outputs = torch.tensor(ctc_results[0].tokens).unsqueeze(0)
        # output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)
        # output_text = ' '.join(output_text)
        # output_text = [output_text, 'test']
        # if int(language_id) == predict_id:
        #     return output_text, 1
        # else:
        #     return output_text, 0
        
        # CTC后，可能需要多个linear进行表征空间转换
        # language_embeds = speech_embeds[:, 0:1, :]  # B*1*N
        # speech_embeds = speech_embeds[:, 1:, :]  # B*(T-1)*N

        if self.speech_encoder2 is not None:
            speech_embeds2, speech_lens2 = self.speech_encoder2(wavs, wavs_len)
            speech_embeds2 = self.hubert_dim2whisper_dim2(speech_embeds2)
            B, T, C = speech_embeds2.shape
            lid_embeds2 = torch.mean(speech_embeds2, dim=1)
            # speech_embeds2 = torch.cat([lid_embeds2.unsqueeze(1), speech_embeds2], dim=1)
            # speech_lens2 = speech_lens2 + 1
            speech_embeds2, speech_masks2 = self.speech_transformer2(speech_embeds2, speech_lens2)
            # speech_lens2 = speech_masks2.sum(dim=2)
            # ctc_probs2 = self.ctc.log_softmax(speech_embeds2)
            # ctc_results2 = ctc_greedy_search(ctc_probs2, speech_lens2, 32060)
            # outputs = torch.tensor(ctc_results2[0].tokens).unsqueeze(0)
            # output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)
            # output_text = ' '.join(output_text)
            # output_text = [output_text, 'test']
            # if int(language_id) == predict_id:
            #     return output_text, 1
            # else:
            #     return output_text, 0


            # speech_embeds2 = speech_embeds2[:, 1:, :]  # B*(T-1)*N
            # speech_embeds = torch.cat([speech_embeds, speech_embeds2], dim=2)
            # speech_embeds = self.mix_encoder_proj(speech_embeds)

            if speech_embeds.shape[1] > speech_embeds2.shape[1]:
                speech_embeds = speech_embeds[:, :speech_embeds2.shape[1], :]
                speech_lens -= 1
            elif speech_embeds.shape[1] < speech_embeds2.shape[1]:
                speech_embeds2 = speech_embeds2[:, :speech_embeds.shape[1], :]
                speech_lens2 -= 1
            # speech_embeds = speech_embeds + speech_embeds2
            lid_embeds = lid_embeds + lid_embeds2
            # gate = self.gated_network(lid_embeds.unsqueeze(1), speech_embeds, speech_embeds2)

            # speech_embeds = gate * speech_embeds + (1 - gate) * speech_embeds2

             # 普通相加
            # speech_embeds = speech_embeds + speech_embeds2
            # language_embedding1，2计算权重
            lid_embeds = self.language_class(lid_embeds)
            # weight1, weight2 = self.weight_network(lid_embeds, speech_embeds, speech_embeds2)  # 输出维度为 (N, 1)

            #06/20方案，先预测lid，根据lid选择存储好的权重
            # import pdb; pdb.set_trace()
            language_probs = torch.softmax(lid_embeds, dim=-1)  # (N, num_languages)
            _, predicted_lid = torch.max(language_probs, dim=1)  # (N,)
            current_weights = self.weight_network() * 2  # (N,)
            # print(current_weights)
            selected_weights = current_weights[predicted_lid]
            weight1 = 2 - selected_weights.unsqueeze(1)
            weight2 = selected_weights.unsqueeze(1)
            logging.info(f'whisper and mms:{weight1} {weight2}. current_weights: {current_weights}')
            # 扩展 weight 以匹配 speech_embedding 的维度
            weight1 = weight1.unsqueeze(1)  # (B, 1, 1)
            weight1 = weight1.expand(B, speech_embeds.shape[1], speech_embeds.shape[2])  # (B, T, N)

            weight2 = weight2.unsqueeze(1)  # (B, 1, 1)
            weight2 = weight2.expand(B, speech_embeds.shape[1], speech_embeds.shape[2])  # (B, T, N)
            speech_embeds = weight1 * speech_embeds + weight2 * speech_embeds2
            

        # speech_embeds = speech_embeds[:, 1:, :]
        # CTC后，内容相关表征再下采样
        speech_embeds = self.down_sample_2(speech_embeds)
        # CTC后，可能需要多个linear进行表征空间转换
        speech_embeds = self.speech_llama_proj1(speech_embeds)
        speech_embeds = self.gelu(speech_embeds)
        speech_embeds = self.speech_llama_proj2(speech_embeds)
        speech_embeds = self.ln_speech(speech_embeds)

        # language_embeds = self.ln_speech(language_embeds)

        # USER: <Speech>speech_embeds<Speech> prompt\nASSISTANT:
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_left, prompt_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')  # prompt_left: 'USER: <Speech>', prompt_right: '</Speech> Describe the speech.\nASSISTANT:'
        prompt_left_ids = self.llama_tokenizer(
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            speech_embeds.device).input_ids  # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids)  # torch.Size([1, 7, 4096])
        prompt_right_ids = self.llama_tokenizer(
            prompt_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            speech_embeds.device).input_ids  # tensor([[ 1533, 10649,  5309, 29958, 20355,   915,   278, 12032, 29889,    13, 22933,  9047, 13566, 29901]], device='cuda:0')
        prompt_right_embeds = embed_tokens(prompt_right_ids)  # torch.Size([1, 14, 4096])

        # prompt_middle_ids = self.llama_tokenizer(
        #     prompt_middle,
        #     return_tensors="pt",
        #     add_special_tokens=False
        # ).to(speech_embeds.device).input_ids
        # prompt_middle_embeds = embed_tokens(prompt_middle_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 14, 4096])

        bos_embeds = self.llama_model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        )  # torch.Size([1, 14, 4096])
        embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        # embeds = torch.cat([prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        # import pdb;pdb.set_trace()
        # generate
        # peft/peft_model.py(726)generate()
        # embeds = embeds.to(torch.float16)
        embeds = embeds.half()
        atts = atts.half()
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            # max_length=self.max_length,
            max_new_tokens=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )
        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)

        if int(language_id) == predict_id:
            return output_text, 1
        else:
            return output_text, 0

    def generate4prompt(self, prompt):
        device = self.llama_model.device
        prompt_ids = self.llama_tokenizer(
            prompt,
            return_tensors="pt",
            add_special_tokens=False
        ).input_ids.to(self.llama_model.device)
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_embeds = embed_tokens(prompt_ids)  # torch.Size([1, 14, 4096])
        bos_embeds = self.llama_model.model.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=device,
            ) * self.llama_tokenizer.bos_token_id
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=device,
            ) * self.llama_tokenizer.bos_token_id
        )  #
        # GenerationConfig {
        #   "assistant_token_id": 196,
        #   "bos_token_id": 1,
        #   "do_sample": true,
        #   "eos_token_id": 2,
        #   "max_new_tokens": 2048,
        #   "pad_token_id": 0,
        #   "repetition_penalty": 1.05,
        #   "temperature": 0.3,
        #   "top_k": 5,
        #   "top_p": 0.85,
        #   "user_token_id": 195
        # }
        embeds = torch.cat([bos_embeds, prompt_embeds], dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            max_length=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,

            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )
        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)
        return output_text
