import logging
import os
import sys

sys.path.append('../../')
import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.transformer.encoder import TransformerEncoder
from wenet.utils.common import add_sos_eos, add_sos_eos4speech_llm
from wenet.utils.gxl_utils import Whisper_Utils
import soundfile as sf


class Salmonn_Model(nn.Module):
    def __init__(self, encoder, llm_path, speech_qformer_token_num=1, speech_qformer_layer=2, lora=True, lora_alpha=32,
                 lora_rank=8, lora_dropout=0.1, second_per_frame=0.333333, second_stride=0.333333, low_resource=False,
                 prompt_pattern="USER: <Speech><SpeechHere></Speech> {}\nASSISTANT:",
                 llama_model_generate_max_length=200, llama_model_generate_min_length=1,
                 llama_model_generate_num_beams=4, llama_model_generate_do_sample=True, llama_model_generate_top_p=0.9,
                 llama_model_generate_repetition_penalty=1.0, llama_model_generate_length_penalty=1.0,
                 llama_model_generate_temperature=1.0, load_epoch_ckpt=False, load_step_ckpt=False,
                 load_eval_ckpt=False, ckpt_path="", *args, **kwargs):
        """"""
        super().__init__(*args, **kwargs)
        # whisper
        self.speech_encoder = encoder
        # self.speech_encoder.eval()
        """
        hubert的dim是1024， whisper的dim的1280， 通过线性层转换
        """
        self.hubert_dim2whisper_dim = nn.Linear(encoder.output_size(), 1280)
        self.ln_speech = nn.LayerNorm(1280)

        # 连接层, 51.6M
        self.speech_transformer = TransformerEncoder(
            input_size=1280,
            output_size=1280,
            attention_heads=4,
            linear_units=2560,
            num_blocks=4,
            dropout_rate=0.1,
            positional_dropout_rate=0.1,
            attention_dropout_rate=0.0,
            input_layer="linear",
            pos_enc_layer_type="abs_pos",
            normalize_before=True
        )

        # LLM,
        # Atom-7B , 6,684.066406M 参数, llama-7b
        """
        llama-7b 模型介绍：
        特征维度： 4096，
        词数： 65000
        encoder_layers_num: 32
        model_size: 6684M 
        """
        if not low_resource:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float16,
            )
        else:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float16,
                load_in_8bit=True,
                device_map="auto"
            )

        self.max_length = llama_model_generate_max_length
        self.min_length = llama_model_generate_min_length
        self.num_beams = llama_model_generate_num_beams
        self.do_sample = llama_model_generate_do_sample
        self.top_p = llama_model_generate_top_p
        self.repetition_penalty = llama_model_generate_repetition_penalty
        self.length_penalty = llama_model_generate_length_penalty
        self.temperature = llama_model_generate_temperature
        self.load_epoch_ckpt = load_epoch_ckpt
        self.load_step_ckpt = load_step_ckpt
        self.load_eval_ckpt = load_eval_ckpt

        # lora
        self.lora = lora
        if lora:
            if self.load_eval_ckpt:
                target_modules = None
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=True,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            else:
                target_modules = None
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            self.llama_model = get_peft_model(self.llama_model, self.peft_config)

        # tokenizer
        # self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)
        self.llama_tokenizer = AutoTokenizer.from_pretrained(llm_path, use_fast=False)
        """
        设置分词器的pad_token和padding的方向。
        """
        self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
        self.llama_tokenizer.padding_side = "right"

        # 中间层与LLM的耦合口：
        self.speech_llama_proj = nn.Linear(
            1280, self.llama_model.config.hidden_size)

        self.prompt_pattern = prompt_pattern
        self.LLM_out_proj = nn.Linear(4096, 65000)
        self.ce_loss = torch.nn.CrossEntropyLoss()

        # load checkpoint
        self.ckpt_path = ckpt_path
        if self.load_epoch_ckpt or self.load_step_ckpt or self.load_eval_ckpt:
            checkpoint = torch.load(self.ckpt_path, map_location=self.llama_model.device)
            self.load_state_dict(checkpoint, strict=False)
            logging.info(f"Checkpoint {self.ckpt_path} has been loaded.")

    def forward(self,
                wavs,
                wavs_len,
                prompt,
                labels):
        """"""
        """
        首先 得到音频编码的特征
        speech_embeds ： 为输入LLM的音频编码特征， 已经对齐特征维度。 shape:(b, t, 4096)
        """
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        speech_embeds = self.ln_speech(speech_embeds)  # 特征维度： 1280
        B, T, C = speech_embeds.shape
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        speech_embeds = self.speech_llama_proj(speech_embeds)

        """
        接着处理prompt， 将其首先使用分词器编码成数字序列shape(1,N), 接着使用LLM的Embedding层对其进行编码shape(1,N, 4096)
        embed_tokens： nn.Embedding(65000, 4096). 
        知识补充：在模型的输入中，可以选择是否在序列的开头和结尾添加一些特殊的token，如CLS、SEP等，
        以适应模型的要求。add_special_tokens=False 表示不添加特殊token
        """
        # prompt-> :  USER: <Speech>speech_embeds</Speech> prompt\nASSISTANT:
        # embed_tokens-> ： nn.Embedding(65000, 4096)
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')
        # prompt_left: USER: <Speech>
        # prompts_right: </Speech> Describe the speech.\nASSISTANT:
        prompt_left_ids = self.llama_tokenizer(  # shape: [1, 7]
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 7, 4096])
        prompt_left_ids = prompt_left_ids.repeat_interleave(B, dim=0)  # torch.Size([17, 7])

        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids
        prompt_right_embeds = embed_tokens(prompt_right_ids).repeat_interleave(B, dim=0)  # torch.Size([17, 14, 4096])
        prompt_right_ids = prompt_right_ids.repeat_interleave(B, dim=0)  # torch.Size([17, 14])

        """
        处理labels, labels本本身是已经padding过的，shape:(B , T)
        首先对其经过sos_eos处理， 得到两个padded_labels_in和padded_labels_out,
        labels_in不加入bos 
        然后使用Embedding层对padded_labels_in进行编码
        接着得到bos_ids和bos_embeds, eos_ids和eos_embeds.shape: (B,1),  (B,1, 4096)
        """
        labels_ids = labels  # torch.Size([17, 13])
        labels_in, labels_out = add_sos_eos4speech_llm(labels_ids, self.llama_tokenizer.bos_token_id,
                                                       self.llama_tokenizer.eos_token_id, ignore_id=-100)
        labels_in_embeds = embed_tokens(labels_in)  # torch.Size([17, 13, 4096])
        bos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=speech_embeds.device) * self.llama_tokenizer.bos_token_id
        bos_embeds = embed_tokens(bos_ids)  # torch.Size([17, 1, 4096])

        eos_ids = torch.ones([B, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 2
                             device=speech_embeds.device) * self.llama_tokenizer.eos_token_id
        eos_embeds = embed_tokens(eos_ids)

        """
        将左prompt 音频 右prompt label_in 的高纬特征拼接在一起。
        将左prompt 音频 右prompt label_out 的id拼接在一起作为ground truth
        """
        speech_embeds_B, speech_embeds_T = speech_embeds.size(0), speech_embeds.size(1)
        speech_ids = torch.ones([speech_embeds_B, speech_embeds_T], dtype=torch.long, device=speech_embeds.device)
        concat_ids = torch.cat([bos_ids, prompt_left_ids, speech_ids, prompt_right_ids], dim=1)
        filled_ids = concat_ids.fill_(-100)  # In CrossEntropyLoss(), ignore_index = -100
        embeds = torch.cat(
            [bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds, labels_in_embeds, eos_embeds], dim=1)
        labels = torch.cat([filled_ids, labels_out], dim=1)
        outputs = self.llama_model(
            inputs_embeds=embeds,
            labels=labels,
        )
        loss = outputs['loss']  # 0维张量，纯数字
        return {"loss": loss}

    def generate(
            self,
            wavs,
            wavs_len,
            prompt,
    ):
        # import pdb;pdb.set_trace()
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        B, T, C = speech_embeds.shape
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        speech_embeds = self.speech_llama_proj(speech_embeds)

        # USER: <Speech>speech_embeds<Speech> prompt\nASSISTANT:
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split(
            '<SpeechHere>')  # prompt_left: 'USER: <Speech>', prompt_right: '</Speech> Describe the speech.\nASSISTANT:'
        prompt_left_ids = self.llama_tokenizer(
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            speech_embeds.device).input_ids  # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids)  # torch.Size([1, 7, 4096])
        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            speech_embeds.device).input_ids  # tensor([[ 1533, 10649,  5309, 29958, 20355,   915,   278, 12032, 29889,    13, 22933,  9047, 13566, 29901]], device='cuda:0')
        prompt_right_embeds = embed_tokens(prompt_right_ids)  # torch.Size([1, 14, 4096])

        bos_embeds = self.llama_model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        )  # torch.Size([1, 14, 4096])

        embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        # import pdb;pdb.set_trace()
        # generate
        # peft/peft_model.py(726)generate()
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            max_length=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )

        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)

        return output_text


if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
    device = torch.device("cuda:7")
    model = Salmonn_Model(
        encoder=Whisper_Utils.load_whisper('tiny')[0].encoder,
        llm_path='/home/local_data/vicuna-7b-v1.5',
    )
    model.to(device)
    model.eval()
    wav = sf.read("/home/work_nfs/common/data/data_aishell/wav/test/S0764/BAC009S0764W0121.wav")[0]
    wav = torch.tensor(wav)
    wav_len = torch.tensor([wav.size(0)])
    prompt = 'Describe the speech.'
    with torch.no_grad():
        import pdb

        pdb.set_trace()
        output = model.generate(wav, wav_len, prompt)
        logging.info('output:{}'.format(output))

    labels = torch.randint(100, 10000, (1, 10))
    loss = model.forward(wav, wav_len, prompt, labels)
    print(loss)
