import logging
import os
import sys

import torchaudio

sys.path.append('../../')
import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.transformer.encoder import TransformerEncoder
from wenet.utils.common import add_sos_eos, add_sos_eos4speech_llm
# from wenet.utils.gxl_utils import Whisper_Utils
from gxl_ai_utils.utils import utils_file
import soundfile as sf
from wenet.salmonn.gxl_utils import global_sampler, get_10_random_wenetspeech_pair


class GxlConv1dSubsampling2(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling4(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling6(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 3),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling8(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 8),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


def gxl_cat_case_by_case(input_tensors_list, control_list):
    # 创建一个用于拼接的元组列表
    concat_list = [input_tensors_list[i] for i in control_list]
    # 在维度 0 上拼接张量
    result = torch.cat(concat_list, dim=0)
    return result


class Salmonn_Model(nn.Module):
    def __init__(self, encoder, llm_path, speech_qformer_token_num=1, speech_qformer_layer=2, lora=True, lora_alpha=32,
                 lora_rank=8, lora_dropout=0.1, second_per_frame=0.333333, second_stride=0.333333, low_resource=False,
                 # prompt=None,# 不会有新值的prompt传入
                 prompt_pattern="{}：<Speech><SpeechHere></Speech>",
                 # "USER: <Speech><SpeechHere></Speech> {}\nASSISTANT:"
                 llama_model_generate_max_length=200, llama_model_generate_min_length=1,
                 llama_model_generate_num_beams=4, llama_model_generate_do_sample=True, llama_model_generate_top_p=0.9,
                 llama_model_generate_repetition_penalty=1.0, llama_model_generate_length_penalty=1.0,
                 llama_model_generate_temperature=1.0, load_epoch_ckpt=False, load_step_ckpt=False,
                 load_eval_ckpt=False, ckpt_path="", is_inference=False, downsample_rate=1, *args, **kwargs):
        """"""
        super().__init__()
        self.downsample_rate = downsample_rate
        self.prompt = "转录如下音频。"
        self.prompt = '这是转录音频任务的示例，<SAMPLE> 请转录如下音频.'

        self.speech_encoder = encoder

        # checkpoint = torch.load('/home/work_nfs8/xlgeng/new_workspace/checkpoint/fairseq/checkpoint_4_100000.pt',
        #                         map_location="cpu")
        # old_checkpoint = checkpoint['model']
        # new_checkpoint = {}
        # for key in old_checkpoint.keys():
        #     if key.startswith('w2v_encoder.w2v_model.'):
        #         new_key = key[len('w2v_encoder.w2v_model.'):]
        #         new_checkpoint[new_key] = old_checkpoint[key]
        #     elif key.startswith('w2v_encoder.proj.'):
        #         new_key = key.replace('w2v_encoder.proj.', 'final_proj.')
        #         # new_checkpoint[new_key] = old_checkpoint[key]
        #     else:
        #         new_checkpoint[key] = old_checkpoint[key]
        #
        # self.speech_encoder.upstream.upstream.model.load_state_dict(new_checkpoint, strict=False)

        # self.speech_encoder.eval()
        """
        hubert的dim是1024， whisper的dim的1280， 通过线性层转换
        """
        self.encoder_type = kwargs.get("encoder_type", "whisper")
        logging.info(f'耿雪龙： encoder_type: {self.encoder_type}')
        self.hubert_dim2whisper_dim = nn.Linear(encoder.output_size(),
                                                1280) if self.encoder_type == "hubert" else nn.Identity()
        self.ln_speech = nn.LayerNorm(1280)

        # 连接层, 51.6M
        self.speech_transformer = TransformerEncoder(
            input_size=1280,
            output_size=1280,
            attention_heads=4,
            linear_units=2560,
            num_blocks=4,
            dropout_rate=0.1,
            positional_dropout_rate=0.1,
            attention_dropout_rate=0.0,
            input_layer="linear",
            pos_enc_layer_type="abs_pos",
            normalize_before=True
        )

        # LLM,
        # Atom-7B , 6,684.066406M 参数, llama-7b
        """
        atom-7b 模型介绍：
        特征维度： 4096，
        词数： 65000
        encoder_layers_num: 32
        model_size: 6684M 
        """
        self.low_resource = low_resource
        if not low_resource:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                # torch_dtype=torch.float32 if is_inference else torch.float16,
                torch_dtype=torch.float16,
                trust_remote_code=True
            )
        else:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float16,
                load_in_8bit=True,
                device_map="auto",
                trust_remote_code=True
            )

        self.max_length = llama_model_generate_max_length
        self.min_length = llama_model_generate_min_length
        self.num_beams = llama_model_generate_num_beams
        self.do_sample = llama_model_generate_do_sample
        self.top_p = llama_model_generate_top_p
        self.repetition_penalty = llama_model_generate_repetition_penalty
        self.length_penalty = llama_model_generate_length_penalty
        self.temperature = llama_model_generate_temperature
        self.load_epoch_ckpt = load_epoch_ckpt
        self.load_step_ckpt = load_step_ckpt
        self.load_eval_ckpt = load_eval_ckpt

        # lora
        self.lora = lora
        if lora:
            logging.info("耿雪龙： 使用lora了")
            target_modules = ['W_pack', 'o_proj', 'gate_proj', 'down_proj']
            if is_inference:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=True,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            else:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            self.llama_model = get_peft_model(self.llama_model, self.peft_config)

        # tokenizer
        # self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)
        self.llama_tokenizer = AutoTokenizer.from_pretrained(
            llm_path, use_fast=False, trust_remote_code=True)
        """
        设置分词器的pad_token和padding的方向。
        """
        self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
        self.llama_tokenizer.padding_side = "right"

        # 中间层与LLM的耦合口：
        self.speech_llama_proj = nn.Linear(
            1280, self.llama_model.config.hidden_size)

        self.prompt_pattern = prompt_pattern

        self.down_sample_2 = nn.Identity()
        if self.downsample_rate == 2:
            self.down_sample_2 = GxlConv1dSubsampling2(1280, 1280)
        elif self.downsample_rate == 4:
            self.down_sample_2 = GxlConv1dSubsampling4(1280, 1280)
        elif self.downsample_rate == 8:
            self.down_sample_2 = GxlConv1dSubsampling8(1280, 1280)
        elif self.downsample_rate == 6:
            self.down_sample_2 = GxlConv1dSubsampling6(1280, 1280)
        self.gxl_num = 0
        self.embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens

    def forward_ICL(self,
                batch,
                device,
                ):
        """"""
        utils_file.logging_limit_print('耿雪龙：进入salmonn.py forward')
        task_control_list = batch.get('task_control_list', None)
        if task_control_list is not None:
            logging.info(f'耿雪龙:task_control_list {task_control_list}')
        utils_file.logging_limit_print('看一下输入情况')
        wavs = batch['feats'].to(device)
        utils_file.logging_limit_print('耿雪龙：输入的音频特征shape:{}'.format(wavs.shape))
        wavs_len = batch['feats_lengths'].to(device)
        utils_file.logging_limit_print('耿雪龙：输入的音频长度shape:{}'.format(wavs_len.shape))
        labels = batch['target'].to(device)
        utils_file.logging_limit_print('耿雪龙：输入的target shape:{}'.format(labels.shape))
        labels_len = batch['target_lengths'].to(device)
        utils_file.logging_limit_print('耿雪龙：输入的target长度shape:{}'.format(labels_len.shape))
        prompt = self.prompt
        utils_file.logging_limit_print('耿雪龙：使用的prompt:{}'.format(prompt))

        """
        首先 得到音频编码的特征
        speech_embeds ： 为输入LLM的音频编码特征， 已经对齐特征维度。 shape:(b, t, 4096)
        """
        utils_file.logging_limit_print('开始得到要转录的音频特征')
        speech_embeds = self.get_embedding_from_wav(wavs, wavs_len)


        """
        接着处理prompt， 将其首先使用分词器编码成数字序列shape(1,N), 接着使用LLM的Embedding层对其进行编码shape(1,N, 4096)
        embed_tokens： nn.Embedding(65000, 4096). 
        知识补充：在模型的输入中，可以选择是否在序列的开头和结尾添加一些特殊的token，如CLS、SEP等，
        以适应模型的要求。add_special_tokens=False 表示不添加特殊token
        """
        res_sample_prompt = ""
        one_sample_prompt_pattern = "示例{}：<SampleHere>,对应转录文本为：<LabelHere>;"
        prompt_wav_path_list, prompt_wav_label_list = get_10_random_wenetspeech_pair()
        # one_sample_prompt_pattern = "示例{}对应转录文本为：<LabelHere>;"
        for i, items in enumerate(zip(prompt_wav_path_list, prompt_wav_label_list)):
            wav_path, wav_label = items
            utils_file.logging_limit_print('generate_ICL(): wav_path:', wav_path)
            utils_file.logging_limit_print('generate_ICL(): wav_label:', wav_label)
            # one_sample_prompt = one_sample_prompt.replace('<SampleHere>', wav_path)
            one_sample_prompt = one_sample_prompt_pattern.replace('<LabelHere>', wav_label)
            one_sample_prompt = one_sample_prompt.replace('{}', str(i))
            res_sample_prompt += one_sample_prompt
        utils_file.logging_limit_print('generate_ICL(): res_sample_prompt:', res_sample_prompt)
        prompt = prompt.replace('<SAMPLE>', res_sample_prompt)
        utils_file.logging_limit_print('generate_ICL(): prompt:', prompt)

        if task_control_list is not None:
            if self.gxl_num < 20:
                self.gxl_num += 1
                logging.info(f'耿雪龙: task_control_list: {task_control_list}')
            assert isinstance(prompt, list), "prompt must be a list when task_control_list is not None"
            prompt_id = task_control_list[0]
            assert set(task_control_list) == {
                prompt_id}, f"task_control_list must be same number,task_control_list: {task_control_list}"
        else:
            prompt_id = 2
        #     # assert isinstance(prompt, str), "prompt must be a string when task_control_list is None"
        # if isinstance(self.prompt, list):
        #     prompt = self.prompt[prompt_id]
        # else:
        #     prompt = "转录如下音频."

        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')
        utils_file.logging_limit_print(f'耿雪龙: prompt_left: {prompt_left}, prompts_right: {prompts_right}')

        # prompt_left: USER: <Speech>
        # prompts_right: </Speech> Describe the speech.\nASSISTANT:

        # prompt_left_ids = self.llama_tokenizer(  # shape: [1, 7]
        #     prompt_left,
        #     return_tensors="pt",
        #     add_special_tokens=False
        # ).to(speech_embeds.device).input_ids
        # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        # prompt_left_embeds = embed_tokens(prompt_left_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 7, 4096])
        # prompt_left_ids = prompt_left_ids.repeat_interleave(B, dim=0)  # torch.Size([17, 7])
        B = speech_embeds.size(0)
        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        prompt_right_embeds = self.embed_tokens(prompt_right_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 14, 4096])
        prompt_right_ids = prompt_right_ids.repeat_interleave(B, dim=0)

        # prompt-> :  USER: <Speech>speech_embeds</Speech> prompt\nASSISTANT:
        # embed_tokens-> ： nn.Embedding(65000, 4096)

        """
        处理labels, labels本本身是已经padding过的，shape:(B , T)
        首先对其经过sos_eos处理， 得到两个padded_labels_in和padded_labels_out,
        labels_in不加入bos 
        然后使用Embedding层对padded_labels_in进行编码
        接着得到bos_ids和bos_embeds, eos_ids和eos_embeds.shape: (B,1),  (B,1, 4096)
        """
        labels_ids = labels  # torch.Size([17, 13])
        labels_in, labels_out = add_sos_eos4speech_llm(labels_ids, self.llama_tokenizer.bos_token_id,
                                                       self.llama_tokenizer.eos_token_id, ignore_id=-100)
        labels_in_embeds = self.embed_tokens(labels_in)  # torch.Size([17, 13, 4096])
        bos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=speech_embeds.device) * self.llama_tokenizer.bos_token_id
        bos_embeds = self.embed_tokens(bos_ids)  # torch.Size([17, 1, 4096])

        eos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 2
                             device=speech_embeds.device) * self.llama_tokenizer.eos_token_id
        eos_embeds = self.embed_tokens(eos_ids)
        user_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                              device=speech_embeds.device) * 195
        user_embeds = self.embed_tokens(user_ids)
        assistant_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                                   device=speech_embeds.device) * 196
        assistant_embeds = self.embed_tokens(assistant_ids)
        """
        将左prompt 音频 右prompt label_in 的高纬特征拼接在一起。
        将左prompt 音频 右prompt label_out 的id拼接在一起作为ground truth
        """

        speech_embeds_B, speech_embeds_T = speech_embeds.size(0), speech_embeds.size(1)
        speech_ids = torch.ones([speech_embeds_B, speech_embeds_T], dtype=torch.long, device=speech_embeds.device)

        prompt_left_list = prompt_left.split('<SampleHere>')
        res_embeds_list = [bos_embeds, user_embeds, ]
        assert len(prompt_left_list) == len(
            prompt_wav_path_list) + 1, f'len(prompt_left_list) != len(prompt_wav_path_list)+1, len(prompt_left_list): {len(prompt_left_list)}, len(prompt_wav_path_list)+1: {len(prompt_wav_path_list) + 1}'
        for i, prompt_i in enumerate(prompt_left_list):
            """"""
            utils_file.logging_limit_print(f'generate_ICL(): prompt_{i}:', prompt_i)
            prompt_i_embeds = self.get_embedding_from_text(prompt_i)
            prompt_i_embeds = prompt_i_embeds.to(speech_embeds.device)
            prompt_i_embeds = prompt_i_embeds.repeat_interleave(B, dim=0)
            res_embeds_list.append(prompt_i_embeds)
            if i != len(prompt_left_list) - 1:
                sample_i_embeds = self.get_embeds_from_wav_path(prompt_wav_path_list[i])
                sample_i_embeds = sample_i_embeds.to(speech_embeds.device)
                sample_i_embeds = sample_i_embeds.repeat_interleave(B, dim=0)
                res_embeds_list.append(sample_i_embeds)
        res_embeds_list.append(speech_embeds)
        res_embeds_list.append(prompt_right_embeds)
        res_embeds_list.append(assistant_embeds)
        res_embeds_list.append(labels_in_embeds)
        res_embeds_list.append(eos_embeds)
        # embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        embeds = torch.cat(
            res_embeds_list,
            dim=1)
        utils_file.logging_limit_print(f'embeds.shape:', embeds.shape)
        embeds_size_0, embeds_size_1 = embeds.size(0), embeds.size(1)
        labels_out_size_0, labels_out_size_1 = labels_out.size(0), labels_out.size(1)
        concat_ids_size_0 , concat_ids_size_1 = B, embeds_size_1-labels_out_size_1
        # concat_ids = torch.cat([bos_ids, user_ids, prompt_left_ids, speech_ids, prompt_right_ids, assistant_ids], dim=1)
        concat_ids = torch.ones(size=[concat_ids_size_0, concat_ids_size_1], dtype=torch.int32,device=embeds.device)
        filled_ids = concat_ids.fill_(-100)  # In CrossEntropyLoss(), ignore_index = -100
        utils_file.logging_limit_print(f'filled_ids.shape:', filled_ids.shape)
        utils_file.logging_limit_print(f'labels_out.shape:', labels_out.shape)
        # embeds = torch.cat(
        #     [bos_embeds, user_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds, assistant_embeds,
        #      labels_in_embeds, eos_embeds], dim=1)
        labels = torch.cat([filled_ids, labels_out], dim=1)
        utils_file.logging_limit_print(f'labels.shape:, label.dtype', labels.shape, labels.dtype)
        if self.low_resource:
            embeds = embeds.to(torch.int8).to(torch.float16)
        outputs = self.llama_model(
            inputs_embeds=embeds,
            labels=labels,
        )
        loss = outputs['loss']  # 0维张量，纯数字
        return {"loss": loss}

    def forward(self,
                batch,
                device,
                ):
        """"""
        utils_file.logging_limit_print('进行salmonn forward() ,首先来看一下输入')
        wavs = batch['feats'].to(device)
        utils_file.logging_limit_print('wavs.shape:', wavs.shape)
        wavs_len = batch['feats_lengths'].to(device)
        # utils_file.logging_limit_print('wavs_len:', wavs_len)
        utils_file.logging_limit_print('wavs_len.shape:', wavs_len.shape)
        labels = batch['target'].to(device)
        utils_file.logging_limit_print('labels.shape:', labels.shape)
        labels_len = batch['target_lengths'].to(device)
        utils_file.logging_limit_print('观看结束')
        prompt = "转录如下音频."
        """
        首先 得到音频编码的特征
        speech_embeds ： 为输入LLM的音频编码特征， 已经对齐特征维度。 shape:(b, t, 4096)
        """
        speech_embeds = self.get_embedding_from_wav(wavs, wavs_len)
        B = speech_embeds.size(0)

        """
        接着处理prompt， 将其首先使用分词器编码成数字序列shape(1,N), 接着使用LLM的Embedding层对其进行编码shape(1,N, 4096)
        embed_tokens： nn.Embedding(65000, 4096). 
        知识补充：在模型的输入中，可以选择是否在序列的开头和结尾添加一些特殊的token，如CLS、SEP等，
        以适应模型的要求。add_special_tokens=False 表示不添加特殊token
        """
        # prompt-> :  USER: <Speech>speech_embeds</Speech> prompt\nASSISTANT:
        # embed_tokens-> ： nn.Embedding(65000, 4096)
        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')
        # prompt_left: USER: <Speech>
        # prompts_right: </Speech> Describe the speech.\nASSISTANT:
        prompt_left_ids = self.llama_tokenizer(  # shape: [1, 7]
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = self.embed_tokens(prompt_left_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 7, 4096])
        prompt_left_ids = prompt_left_ids.repeat_interleave(B, dim=0)  # torch.Size([17, 7])

        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        prompt_right_embeds = self.embed_tokens(prompt_right_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 14, 4096])
        prompt_right_ids = prompt_right_ids.repeat_interleave(B, dim=0)  # torch.Size([17, 14])

        """
        处理labels, labels本本身是已经padding过的，shape:(B , T)
        首先对其经过sos_eos处理， 得到两个padded_labels_in和padded_labels_out,
        labels_in不加入bos 
        然后使用Embedding层对padded_labels_in进行编码
        接着得到bos_ids和bos_embeds, eos_ids和eos_embeds.shape: (B,1),  (B,1, 4096)
        """
        labels_ids = labels  # torch.Size([17, 13])
        labels_in, labels_out = add_sos_eos4speech_llm(labels_ids, self.llama_tokenizer.bos_token_id,
                                                       self.llama_tokenizer.eos_token_id, ignore_id=-100)
        labels_in_embeds = self.embed_tokens(labels_in)  # torch.Size([17, 13, 4096])
        bos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=speech_embeds.device) * self.llama_tokenizer.bos_token_id
        bos_embeds = self.embed_tokens(bos_ids)  # torch.Size([17, 1, 4096])

        eos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 2
                             device=speech_embeds.device) * self.llama_tokenizer.eos_token_id
        eos_embeds = self.embed_tokens(eos_ids)
        user_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                              device=speech_embeds.device) * 195
        user_embeds = self.embed_tokens(user_ids)
        assistant_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                                   device=speech_embeds.device) * 196
        assistant_embeds = self.embed_tokens(assistant_ids)

        """
        将左prompt 音频 右prompt label_in 的高纬特征拼接在一起。
        将左prompt 音频 右prompt label_out 的id拼接在一起作为ground truth
        """
        speech_embeds_B, speech_embeds_T = speech_embeds.size(0), speech_embeds.size(1)
        speech_ids = torch.ones([speech_embeds_B, speech_embeds_T], dtype=torch.long, device=speech_embeds.device)
        concat_ids = torch.cat([bos_ids, user_ids, prompt_left_ids, speech_ids, prompt_right_ids, assistant_ids], dim=1)
        filled_ids = concat_ids.fill_(-100)  # In CrossEntropyLoss(), ignore_index = -100
        labels = torch.cat([filled_ids, labels_out], dim=1)
        embeds = torch.cat(
            [bos_embeds, user_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds, assistant_embeds,
             labels_in_embeds, eos_embeds], dim=1)
        outputs = self.llama_model(
            inputs_embeds=embeds,
            labels=labels,
        )
        loss = outputs['loss']  # 0维张量，纯数字
        return {"loss": loss}



    def generate(
            self,
            wavs,
            wavs_len,
            prompt,
    ):
        # logging.info(prompt_id)
        # prompt = self.prompt[prompt_id]
        if self.gxl_num < 10:
            logging.info("prompt: {}".format(prompt))
            self.gxl_num += 1
        # import pdb;pdb.set_trace()
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        speech_embeds = self.down_sample_2(speech_embeds)
        B, T, C = speech_embeds.shape
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        speech_embeds = self.speech_llama_proj(speech_embeds)

        # USER: <Speech>speech_embeds<Speech> prompt\nASSISTANT:
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')  # prompt_left: 'USER: <Speech>', prompt_right: '</Speech> Describe the speech.\nASSISTANT:'
        prompt_left_ids = self.llama_tokenizer(
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            speech_embeds.device).input_ids  # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids)  # torch.Size([1, 7, 4096])
        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            speech_embeds.device).input_ids  # tensor([[ 1533, 10649,  5309, 29958, 20355,   915,   278, 12032, 29889,    13, 22933,  9047, 13566, 29901]], device='cuda:0')
        prompt_right_embeds = embed_tokens(prompt_right_ids)  # torch.Size([1, 14, 4096])

        bos_embeds = self.llama_model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        )  # torch.Size([1, 14, 4096])
        user_embeds = embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * 195
        )
        assistant_embeds = embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * 196
        )
        # embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        embeds = torch.cat(
            [bos_embeds, user_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds, assistant_embeds],
            dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        # import pdb;pdb.set_trace()
        # generate
        # peft/peft_model.py(726)generate()
        # embeds = embeds.to(torch.float16)
        if self.embed_tokens.weight.dtype == torch.float16:
            utils_file.logging_limit_print('generate(): self.embed_tokens.weight.dtype == torch.float16')
            embeds = embeds.to(torch.float16)
            atts = atts.half()
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            # max_length=self.max_length,
            max_new_tokens=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )

        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)

        return output_text

    def get_embedding_from_wav(self, wavs, wavs_len):
        """"""
        utils_file.logging_limit_print('get_embedding_from_wav(): wavs.shape:', wavs.shape)
        # utils_file.logging_limit_print('get_embedding_from_wav(): wavs_len:', wavs_len)
        utils_file.logging_limit_print('get_embedding_from_wav(): wavs_len.shape:', wavs_len.shape)
        utils_file.logging_limit_print('get_embedding_from_wav(): wavs_len.shape:', wavs_len.shape)
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        utils_file.logging_limit_print(
            'get_embedding_from_wav(): speech_embeds.shape,by  self.speech_encoder(wavs, wavs_len):',
            speech_embeds.shape)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        utils_file.logging_limit_print(
            'get_embedding_from_wav(): speech_embeds.shape,by  self.hubert_dim2whisper_dim(speech_embeds):',
            speech_embeds.shape)
        speech_embeds = self.down_sample_2(speech_embeds)
        utils_file.logging_limit_print(
            'get_embedding_from_wav(): speech_embeds.shape,by  self.down_sample_2(speech_embeds):', speech_embeds.shape)
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        utils_file.logging_limit_print(
            'get_embedding_from_wav(): speech_embeds.shape,by  self.speech_transformer(speech_embeds, speech_lens):',
            speech_embeds.shape)
        speech_embeds = self.speech_llama_proj(speech_embeds)
        utils_file.logging_limit_print(
            'get_embedding_from_wav(): speech_embeds.shape,by  self.speech_llama_proj(speech_embeds):',
            speech_embeds.shape)
        return speech_embeds

    def get_embedding_from_text(self, text):
        text_id = self.llama_tokenizer(
            text,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            self.embed_tokens.weight.device).input_ids
        text_embeds = self.embed_tokens(text_id)
        return text_embeds

    def get_embeds_from_wav_path(self, wav_path):
        wav_i2_path = wav_path
        utils_file.logging_limit_print('get_embeds_from_wav_path(): wav_i2_path:', wav_i2_path)
        waveform_i2, _ = torchaudio.load(wav_i2_path)
        utils_file.logging_limit_print('get_embeds_from_wav_path(): waveform_i2.shape:', waveform_i2.shape)
        if len(waveform_i2.shape) != 1:
            waveform_i2 = waveform_i2[0]
        waveform_i2 = waveform_i2.to(self.embed_tokens.weight.device)
        wavs_len_i2 = torch.tensor([len(waveform_i2)], device=self.embed_tokens.weight.device, dtype=torch.int32)
        wavs_i2 = waveform_i2.unsqueeze(0)
        sample_i2_embeds = self.get_embedding_from_wav(wavs_i2, wavs_len_i2)
        utils_file.logging_limit_print('get_embeds_from_wav_path(): sample_i2_embeds.shape:', sample_i2_embeds.shape)
        return sample_i2_embeds


    def generate_ICL(
            self,
            wavs,
            wavs_len,
            prompt,
            prompt_wav_path_list):
        """"""
        utils_file.logging_limit_print('generate_ICL(): prompt:', prompt)
        utils_file.logging_limit_print('generate_ICL(): prompt_wav_path_list:', prompt_wav_path_list)
        utils_file.logging_limit_print('generate_ICL(): wavs.shape:', wavs.shape)
        utils_file.logging_limit_print('generate_ICL(): wavs_len.shape:', wavs_len.shape)
        utils_file.logging_limit_print('generate_ICL(): wavs_len:', wavs_len)

        speech_embeds = self.get_embedding_from_wav(wavs, wavs_len)
        B, T, C = speech_embeds.shape
        utils_file.logging_limit_print('generate_ICL(): speech_embeds.shape:', speech_embeds.shape)

        wav_i1_path = prompt_wav_path_list[0]
        utils_file.logging_limit_print('generate_ICL(): wav_i1_path:', wav_i1_path)
        waveform_i1, _ = torchaudio.load(wav_i1_path)
        if len(waveform_i1.shape) != 1:
            waveform_i1 = waveform_i1[0]
        waveform_i1 = waveform_i1.to(speech_embeds.device)
        wavs_len_i1 = torch.tensor([len(waveform_i1)], device=speech_embeds.device, dtype=torch.int32)
        wavs_i1 = waveform_i1.unsqueeze(0)
        sample_i1_embeds = self.get_embedding_from_wav(wavs_i1, wavs_len_i1)
        utils_file.logging_limit_print('generate_ICL(): sample_i1_embeds.shape:', sample_i1_embeds.shape)

        wav_i2_path = prompt_wav_path_list[1]
        utils_file.logging_limit_print('generate_ICL(): wav_i2_path:', wav_i2_path)
        waveform_i2, _ = torchaudio.load(wav_i2_path)
        if len(waveform_i2.shape) != 1:
            waveform_i2 = waveform_i2[0]
        waveform_i2 = waveform_i2.to(speech_embeds.device)
        wavs_len_i2 = torch.tensor([len(waveform_i2)], device=speech_embeds.device, dtype=torch.int32)
        wavs_i2 = waveform_i2.unsqueeze(0)
        sample_i2_embeds = self.get_embedding_from_wav(wavs_i2, wavs_len_i2)
        utils_file.logging_limit_print('generate_ICL(): sample_i2_embeds.shape:', sample_i2_embeds.shape)

        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')  # prompt_left: 'USER: <Speech>', prompt_right: '</Speech> Describe the speech.\nASSISTANT:'

        prompt_left_1, prompt_left_2, prompt_left_3 = prompt_left.split('<SampleHere>')
        utils_file.logging_limit_print('generate_ICL(): prompt_left_1:', prompt_left_1)
        utils_file.logging_limit_print('generate_ICL(): prompt_left_2:', prompt_left_2)
        utils_file.logging_limit_print('generate_ICL(): prompt_left_3:', prompt_left_3)

        prompt_left_embeds_1 = self.get_embedding_from_text(prompt_left_1)
        prompt_left_embeds_2 = self.get_embedding_from_text(prompt_left_2)
        prompt_left_embeds_3 = self.get_embedding_from_text(prompt_left_3)
        utils_file.logging_limit_print('generate_ICL(): prompt_left_embeds_1.shape:', prompt_left_embeds_1.shape)
        utils_file.logging_limit_print('generate_ICL(): prompt_left_embeds_2.shape:', prompt_left_embeds_2.shape)
        utils_file.logging_limit_print('generate_ICL(): prompt_left_embeds_3.shape:', prompt_left_embeds_3.shape)

        prompt_right_embeds = self.get_embedding_from_text(prompts_right)
        bos_embeds = self.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id)
        user_embeds = self.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * 195
        )
        assistant_embeds = self.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * 196
        )
        # embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        embeds = torch.cat(
            [bos_embeds, user_embeds, prompt_left_embeds_1, sample_i1_embeds, prompt_left_embeds_2, sample_i2_embeds,
             prompt_left_embeds_3, speech_embeds, prompt_right_embeds, assistant_embeds],
            dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        if self.embed_tokens.weight.dtype == torch.float16:
            utils_file.logging_limit_print('generate_ICL(): self.embed_tokens.weight.dtype == torch.float16')
            embeds = embeds.to(torch.float16)
            atts = atts.half()
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            # max_length=self.max_length,
            max_new_tokens=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )

        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)

        return output_text

    def generate_ICL_with_target(
        self,
        wavs,
        wavs_len,
        prompt,
        prompt_wav_path_list=None,
        prompt_wav_label_list=None,
    ):
        """"""
        utils_file.logging_limit_print("开始得到示例list")
        if prompt_wav_path_list is None or prompt_wav_label_list is None:
            prompt_wav_path_list, prompt_wav_label_list = global_sampler.get_num_pair(10)
        utils_file.logging_limit_print("结束得到示例list")

        utils_file.logging_limit_print('generate_ICL(): prompt:', prompt)
        utils_file.logging_limit_print('generate_ICL(): prompt_wav_path_list:', prompt_wav_path_list)
        utils_file.logging_limit_print('generate_ICL(): prompt_wav_label_list:', prompt_wav_label_list)
        utils_file.logging_limit_print('generate_ICL(): wavs.shape:', wavs.shape)
        utils_file.logging_limit_print('generate_ICL(): wavs_len.shape:', wavs_len.shape)
        utils_file.logging_limit_print('generate_ICL(): wavs_len:', wavs_len)

        assert len(prompt_wav_path_list) == len(prompt_wav_label_list), f'len(prompt_wav_path_list) != len(prompt_wav_label_list), len(prompt_wav_path_list): {len(prompt_wav_path_list)}, len(prompt_wav_label_list): {len(prompt_wav_label_list)}'
        res_sample_prompt = ""
        one_sample_prompt_pattern = "示例{}：<SampleHere>,对应转录文本为：<LabelHere>;"
        # one_sample_prompt_pattern = "示例{}对应转录文本为：<LabelHere>;"
        for i, items in enumerate(zip(prompt_wav_path_list, prompt_wav_label_list)):
            wav_path, wav_label = items
            utils_file.logging_limit_print('generate_ICL(): wav_path:', wav_path)
            utils_file.logging_limit_print('generate_ICL(): wav_label:', wav_label)
            # one_sample_prompt = one_sample_prompt.replace('<SampleHere>', wav_path)
            one_sample_prompt = one_sample_prompt_pattern.replace('<LabelHere>', wav_label)
            one_sample_prompt = one_sample_prompt.replace('{}', str(i))
            res_sample_prompt += one_sample_prompt
        utils_file.logging_limit_print('generate_ICL(): res_sample_prompt:', res_sample_prompt)
        prompt = prompt.replace('<SAMPLE>', res_sample_prompt)
        utils_file.logging_limit_print('generate_ICL(): prompt:', prompt)

        speech_embeds = self.get_embedding_from_wav(wavs, wavs_len)
        B, T, C = speech_embeds.shape
        utils_file.logging_limit_print('generate_ICL(): speech_embeds.shape:', speech_embeds.shape)

        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')  # prompt_left: 'USER: <Speech>', prompt_right: '</Speech> Describe the speech.\nASSISTANT:'

        prompt_left_list = prompt_left.split('<SampleHere>')
        utils_file.logging_limit_print('generate_ICL(): prompt_left_list:', prompt_left_list)
        utils_file.logging_limit_print('generate_ICL(): len(prompt_left_list):', len(prompt_left_list))

        prompt_right_embeds = self.get_embedding_from_text(prompts_right)
        bos_embeds = self.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id)
        user_embeds = self.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * 195
        )
        assistant_embeds = self.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * 196
        )

        res_embeds_list = [bos_embeds, user_embeds,]
        assert len(prompt_left_list) == len(prompt_wav_path_list)+1, f'len(prompt_left_list) != len(prompt_wav_path_list)+1, len(prompt_left_list): {len(prompt_left_list)}, len(prompt_wav_path_list)+1: {len(prompt_wav_path_list)+1}'
        for i, prompt_i in enumerate(prompt_left_list):
            """"""
            utils_file.logging_limit_print(f'generate_ICL(): prompt_{i}:', prompt_i)
            prompt_i_embeds = self.get_embedding_from_text(prompt_i)
            res_embeds_list.append(prompt_i_embeds)
            if i != len(prompt_left_list) - 1:
                sample_i_embeds = self.get_embeds_from_wav_path(prompt_wav_path_list[i])
                res_embeds_list.append(sample_i_embeds)
        res_embeds_list.append(speech_embeds)
        res_embeds_list.append(prompt_right_embeds)
        res_embeds_list.append(assistant_embeds)
        # embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        embeds = torch.cat(
            res_embeds_list,
            dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        if self.embed_tokens.weight.dtype == torch.float16:
            utils_file.logging_limit_print('generate_ICL(): self.embed_tokens.weight.dtype == torch.float16')
            embeds = embeds.to(torch.float16)
            atts = atts.half()
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            # max_length=self.max_length,
            max_new_tokens=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )

        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)

        return output_text

    def generate4prompt(self, prompt):
        device = self.llama_model.device
        prompt_ids = self.llama_tokenizer(
            prompt,
            return_tensors="pt",
            add_special_tokens=False
        ).input_ids.to(self.llama_model.device)
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_embeds = embed_tokens(prompt_ids)  # torch.Size([1, 14, 4096])
        bos_embeds = self.llama_model.model.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=device,
            ) * self.llama_tokenizer.bos_token_id
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            torch.ones(
                [1, 1],
                dtype=torch.long,
                device=device,
            ) * self.llama_tokenizer.bos_token_id
        )  #
        # GenerationConfig {
        #   "assistant_token_id": 196,
        #   "bos_token_id": 1,
        #   "do_sample": true,
        #   "eos_token_id": 2,
        #   "max_new_tokens": 2048,
        #   "pad_token_id": 0,
        #   "repetition_penalty": 1.05,
        #   "temperature": 0.3,
        #   "top_k": 5,
        #   "top_p": 0.85,
        #   "user_token_id": 195
        # }
        embeds = torch.cat([bos_embeds, prompt_embeds], dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            max_length=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,

            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )
        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)
        return output_text
