import logging
import os
import random
import sys

import torchaudio

sys.path.append('../../')
import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.transformer.encoder import TransformerEncoder
from wenet.utils.common import add_sos_eos, add_sos_eos4speech_llm, th_accuracy, top_k_accuracy
# from wenet.utils.gxl_utils import Whisper_Utils
from gxl_ai_utils.utils import utils_file
from transformers import LlamaConfig, LlamaModel
from wenet.nano_gpt import ar_llama


class GxlConv1dSubsampling2(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling4(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling6(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 3),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


class GxlConv1dSubsampling8(nn.Module):
    """Conv1d subsampling module.

    Args:
        idim (int): Input dimension.
        odim (int): Output dimension.
        dropout_rate (float): Dropout rate.

    """

    def __init__(self, idim: int, odim: int):
        """Construct an Conv1dSubsampling object."""
        super().__init__()
        self.conv = torch.nn.Sequential(
            torch.nn.Conv1d(idim, odim, 3, 1),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 2),
            torch.nn.GELU(),
            torch.nn.Conv1d(odim, odim, 3, 8),
            torch.nn.GELU(),
        )

    def forward(self, x):
        """

        Args:
            x: (B, T, idim)

        Returns:
        """
        x = x.transpose(1, 2)
        x = self.conv(x)
        x = x.transpose(1, 2)
        return x


def gxl_cat_case_by_case(input_tensors_list, control_list):
    # 创建一个用于拼接的元组列表
    concat_list = [input_tensors_list[i] for i in control_list]
    # 在维度 0 上拼接张量
    result = torch.cat(concat_list, dim=0)
    return result


class Salmonn_Model(nn.Module):
    def __init__(self, encoder, llm_path, speech_qformer_token_num=1, speech_qformer_layer=2, lora=True, lora_alpha=32,
                 lora_rank=8, lora_dropout=0.1, second_per_frame=0.333333, second_stride=0.333333, low_resource=False,
                 # prompt=None,# 不会有新值的prompt传入
                 prompt_pattern="{}：<Speech><SpeechHere></Speech>",
                 # "USER: <Speech><SpeechHere></Speech> {}\nASSISTANT:"
                 llama_model_generate_max_length=200, llama_model_generate_min_length=1,
                 llama_model_generate_num_beams=4, llama_model_generate_do_sample=True, llama_model_generate_top_p=0.9,
                 llama_model_generate_repetition_penalty=1.0, llama_model_generate_length_penalty=1.0,
                 llama_model_generate_temperature=1.0, load_epoch_ckpt=False, load_step_ckpt=False,
                 load_eval_ckpt=False, ckpt_path="", is_inference=False, downsample_rate=1, *args, **kwargs):
        """"""
        super().__init__()

        self.speech_transformer_2 = TransformerEncoder(
            input_size=1024,
            output_size=1024,
            attention_heads=4,
            linear_units=2560,
            num_blocks=5,
            dropout_rate=0.1,
            positional_dropout_rate=0.1,
            attention_dropout_rate=0.0,
            input_layer="linear",
            pos_enc_layer_type="abs_pos",
            normalize_before=True
        )
        utils_file.do_print_param_num_all(self.speech_transformer_2, "speech_transformer_2")

        self.max_length = llama_model_generate_max_length
        self.min_length = llama_model_generate_min_length
        self.num_beams = llama_model_generate_num_beams
        self.do_sample = llama_model_generate_do_sample
        self.top_p = llama_model_generate_top_p
        self.repetition_penalty = llama_model_generate_repetition_penalty
        self.length_penalty = llama_model_generate_length_penalty
        self.temperature = llama_model_generate_temperature
        self.load_epoch_ckpt = load_epoch_ckpt
        self.load_step_ckpt = load_step_ckpt
        self.load_eval_ckpt = load_eval_ckpt

        self.llama_nano_proj = nn.Sequential(
            nn.Linear(4096, 2048),
            nn.ReLU(),
            nn.Linear(2048, 1024),
            nn.ReLU(),
            nn.Linear(1024, 1024),
        )
        utils_file.logging_print("self.llama_model.config.hidden_size = ", 4096)
        utils_file.do_print_param_num_all(self.llama_nano_proj, "llama_nano_proj")
        self.acoustic_LLM = ar_llama.get_LLM_decoder()
        utils_file.do_print_param_num_all(self.acoustic_LLM, "acoustic_LLM")

        self.text2embedding = torch.nn.Embedding.from_pretrained(
            torch.load(
                "/home/work_nfs8/xlgeng/new_workspace/wenet_gxl_salmonn_TTS/examples/aishell/wenetspeech4tts_handler/data_list/gxl_embedding.pt"))
        utils_file.do_freeze_all_params(self.text2embedding, "text2embedding")

    def forward(self,
                batch,
                device,
                ):
        """"""
        utils_file.logging_limit_print('先来看看输入内容-----------------------------start')
        # wavs = batch['feats'].to(device)
        # utils_file.logging_limit_print(f'耿雪龙:wavs shape：{wavs.shape}')
        # wavs_len = batch['feats_lengths'].to(device)
        # utils_file.logging_limit_print(f'耿雪龙:wavs_len shape：{wavs_len.shape}')
        # utils_file.logging_limit_print(f'耿雪龙:wavs_len {wavs_len}')
        labels = batch['target'].to(device)
        utils_file.logging_limit_print(f'耿雪龙:labels shape：{labels.shape}')
        labels_len = batch['target_lengths'].to(device)
        utils_file.logging_limit_print(f'耿雪龙:labels_len shape：{labels_len.shape}')
        utils_file.logging_limit_print(f'耿雪龙:labels_len {labels_len}')
        acoustic_token = (batch['npy']).to(device)
        utils_file.logging_limit_print(f'耿雪龙:acoustic_token shape：{acoustic_token.shape}')
        acoustic_token_len = (batch['npy_lengths']).to(device)
        utils_file.logging_limit_print(f'耿雪龙:acoustic_token_len shape：{acoustic_token_len.shape}')
        utils_file.logging_limit_print(f'耿雪龙:acoustic_token_len {acoustic_token_len}')
        utils_file.logging_limit_print('batch内容观看结束-------------------------end')

        labels_padded = utils_file.do_padding_ids_by_lens(labels, labels_len, 0)
        text_embeds = self.text2embedding(labels_padded)
        utils_file.logging_limit_print(f'取文本对应的那批embeds的形状：{text_embeds.shape}')
        text_embeds_padded = utils_file.do_padding_embeds_by_lens(text_embeds, labels_len, 0)
        text_embeds = self.llama_nano_proj(text_embeds_padded)
        utils_file.logging_limit_print(f'text_embeds_feature after llama_nano_proj shape:{text_embeds.shape}')
        text_embeds, text_masks = self.speech_transformer_2(text_embeds, labels_len)
        utils_file.logging_limit_print(f'text_embeds_feature after speech_transformer_2 shape:{text_embeds.shape}')
        B = text_embeds.size(0)
        embed_tokens = self.acoustic_LLM.transformer.wte
        bos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=device) * self.acoustic_LLM.config.bos_token_id
        bos_embeds = embed_tokens(bos_ids)  # torch.Size([17, 1, 1024])
        eos_ids_text = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                                  device=device) * self.acoustic_LLM.config.eos_token_id_text
        eos_embeds_text = embed_tokens(eos_ids_text)  # torch.Size([17, 1, 1024])
        bos_ids_mel = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                                 device=device) * self.acoustic_LLM.config.bos_token_id_mel
        bos_embeds_mel = embed_tokens(bos_ids_mel)  # torch.Size([17, 1, 1024])
        eos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=device) * self.acoustic_LLM.config.eos_token_id
        eos_embeds = embed_tokens(eos_ids)  # torch.Size([17, 1, 1024])

        mel_in, mel_out = add_sos_eos4speech_llm(acoustic_token, self.acoustic_LLM.config.bos_token_id_mel,
                                                 self.acoustic_LLM.config.eos_token_id, ignore_id=-100)
        mel_in_embeds = embed_tokens(mel_in)  # torch.Size([17, 13, 1024])
        text_embeds_B, text_embeds_T = text_embeds.size(0), text_embeds.size(1)
        text_ids = torch.ones([text_embeds_B, text_embeds_T], dtype=torch.long, device=text_embeds.device)
        concat_ids = torch.cat([bos_ids, text_ids, eos_ids_text], dim=1)
        filled_ids = concat_ids.fill_(-100)  # In CrossEntropyLoss(), ignore_index = -100
        embeds = torch.cat(
            [bos_embeds, text_embeds, eos_embeds_text, bos_embeds_mel, mel_in_embeds, eos_embeds], dim=1)
        labels = torch.cat([filled_ids, bos_ids_mel, mel_out], dim=1)
        utils_file.logging_limit_print('如下是输入little LLM的内容的shape')
        utils_file.logging_limit_print(f'embeds shape:{embeds.shape}')
        utils_file.logging_limit_print(f'labels shape:{labels.shape}')
        outputs_2 = self.acoustic_LLM.forward(
            inputs_embeds=embeds,
            labels=labels,
        )
        mel_embeds = outputs_2['hidden_states'][-1]
        true_mel_embeds_num = mel_in_embeds.size(1) + 1 + 1
        true_mel_embeds = mel_embeds[:, -true_mel_embeds_num:-2, :]
        utils_file.logging_limit_print(f'true_mel_embeds shape:{true_mel_embeds.shape}')
        logits = self.acoustic_LLM.lm_head(true_mel_embeds)
        utils_file.logging_limit_print(f'logits shape:{logits.shape}')
        acc_att_50 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=50
        )
        acc_att_30 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=30
        )
        acc_att_20 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=20
        )
        acc_att_10 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=10
        )
        acc_att_5 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=5
        )
        acc_att_2 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=2
        )
        acc_att_1 = top_k_accuracy(
            logits.view(-1, self.acoustic_LLM.config.vocab_size),
            acoustic_token,
            ignore_label=4096,
            k=1
        )
        loss = outputs_2['loss']  # 0维张量，纯数字
        return {"loss": loss, f"acc_att_top_50": torch.tensor(acc_att_50), f"acc_att_top_30": torch.tensor(acc_att_30),
                f"acc_att_top_20": torch.tensor(acc_att_20), f"acc_att_top_10": torch.tensor(acc_att_10),
                f"acc_att_top_5": torch.tensor(acc_att_5), f"acc_att_top_2": torch.tensor(acc_att_2),
                f"acc_att_top_1": torch.tensor(acc_att_1)}

    def get_tokens_by_embeding(self, text_embedding, device):
        text_embedding = text_embedding.to(torch.float32)
        text_embeds = self.llama_nano_proj(text_embedding)
        utils_file.logging_limit_print(f'text_embeds_feature after llama_nano_proj shape:{text_embeds.shape}')
        text_embeds, text_masks = self.speech_transformer_2(text_embeds,
                                                             torch.tensor([len(text_embeds[0])], device=device,
                                                                          dtype=torch.long))
        utils_file.logging_limit_print(f'text_embeds_feature after speech_transformer_2 shape:{text_embeds.shape}')

        embed_tokens = self.acoustic_LLM.transformer.wte
        bos_ids = torch.ones([1, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=device) * self.acoustic_LLM.config.bos_token_id
        bos_embeds = embed_tokens(bos_ids)  # torch.Size([17, 1, 1024])
        eos_ids_text = torch.ones([1, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                                  device=device) * self.acoustic_LLM.config.eos_token_id_text
        eos_embeds_text = embed_tokens(eos_ids_text)  # torch.Size([17, 1, 1024])
        bos_ids_mel = torch.ones([1, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                                 device=device) * self.acoustic_LLM.config.bos_token_id_mel
        bos_embeds_mel = embed_tokens(bos_ids_mel)  # torch.Size([17, 1, 1024])
        eos_ids = torch.ones([1, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=device) * self.acoustic_LLM.config.eos_token_id
        eos_embeds = embed_tokens(eos_ids)  # torch.Size([17, 1, 1024])

        # mel_in, mel_out = add_sos_eos4speech_llm(acoustic_token, model.acoustic_LLM.config.bos_token_id_mel,
        #                                          model.acoustic_LLM.config.eos_token_id, ignore_id=-100)
        # mel_in_embeds = embed_tokens(mel_in)  # torch.Size([17, 13, 1024])
        text_embeds_B, text_embeds_T = text_embeds.size(0), text_embeds.size(1)
        text_ids = torch.ones([text_embeds_B, text_embeds_T], dtype=torch.long, device=text_embeds.device)
        concat_ids = torch.cat([bos_ids, text_ids, eos_ids_text], dim=1)
        filled_ids = concat_ids.fill_(-100)  # In CrossEntropyLoss(), ignore_index = -100
        embeds = torch.cat(
            [bos_embeds, text_embeds, eos_embeds_text, bos_embeds_mel, ], dim=1)
        # labels = torch.cat([filled_ids, bos_ids_mel, mel_out], dim=1)
        utils_file.logging_limit_print('如下是输入little LLM的内容的shape')
        utils_file.logging_limit_print(f'embeds shape:{embeds.shape}')
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        utils_file.logging_limit_print(f'atts shape:{atts.shape}')
        utils_file.do_print_model_dtype(self.acoustic_LLM)
        # utils_file.logging_limit_print(f'labels shape:{labels.shape}')
        outputs_2 = self.acoustic_LLM.generate(
            inputs_embeds=embeds,
            attention_mask=atts,
            max_length=2024,
            bos_token_id=self.acoustic_LLM.config.bos_token_id,
            eos_token_id=self.acoustic_LLM.config.eos_token_id,
            pad_token_id=self.acoustic_LLM.config.pad_token_id,
        )
        if outputs_2.size(0) == 1:
            temp = outputs_2[0].tolist()
            res = " ".join([str(x) for x in temp])
            return res
        else:
            logging.error('推理时的batch size不是1')

    def generate(
            self,
            batch,
            device,
    ):
        """Generate mel-spectrograms from text."""
        text_batch = batch["target"]
        text_batch = text_batch.to(device)
        assert text_batch.size(0) == 1, "batch size should be 1"
        text_embeds = self.text2embedding(text_batch)
        token_ids = self.get_tokens_by_embeding(text_embeds, device)
        return token_ids

