import logging
import sys

import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.utils.common import add_sos_eos4speech_llm

sys.path.append('../../')


class LlmFtWrapper(nn.Module):
    """
    LlmFtWrapper
    """

    def __init__(self, llm_path,
                 lora=False,
                 lora_alpha=32,
                 lora_rank=8,
                 lora_dropout=0.1,
                 is_inference=False):
        super().__init__()
        self.llm_path = llm_path
        self.llama_model = AutoModelForCausalLM.from_pretrained(
            llm_path,trust_remote_code=True,
            torch_dtype=torch.float16,
        )
        self.lora = lora
        if lora:
            logging.info("耿雪龙： 使用lora了")
            target_modules = ['W_pack', 'o_proj', 'gate_proj', 'down_proj']
            if is_inference:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=True,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            else:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            self.llama_model = get_peft_model(self.llama_model, self.peft_config)

        self.llama_tokenizer = AutoTokenizer.from_pretrained(llm_path, use_fast=False, trust_remote_code=True)
        """
        设置分词器的pad_token和padding的方向。
        """
        self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'})
        self.llama_tokenizer.padding_side = "right"

    def forward(self,
                batch,
                device, ):
        """"""
        labels = batch['target'].to(device)
        labels_len = batch['target_lengths'].to(device)
        """
        接着处理prompt， 将其首先使用分词器编码成数字序列shape(1,N), 接着使用LLM的Embedding层对其进行编码shape(1,N, 4096)
        embed_tokens： nn.Embedding(65000, 4096). 
        知识补充：在模型的输入中，可以选择是否在序列的开头和结尾添加一些特殊的token，如CLS、SEP等，
        以适应模型的要求。add_special_tokens=False 表示不添加特殊token
        """
        # prompt-> :  USER: <Speech>speech_embeds</Speech> prompt\nASSISTANT:
        # embed_tokens-> ： nn.Embedding(65000, 4096)
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        labels_ids = labels
        labels_in_ids, labels_out_ids = add_sos_eos4speech_llm(labels_ids, self.llama_tokenizer.bos_token_id,
                                                               self.llama_tokenizer.eos_token_id, ignore_id=-100)
        labels_in_embeds = embed_tokens(labels_in_ids)
        batch_size = labels.shape[0]
        bos_ids = torch.ones([batch_size, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 1
                             device=device) * self.llama_tokenizer.bos_token_id
        bos_embeds = embed_tokens(bos_ids)
        eos_ids = torch.ones([batch_size, 1], dtype=torch.long,  # torch.Size([17, 1]), true value is 2
                             device=device) * self.llama_tokenizer.eos_token_id
        eos_embeds = embed_tokens(eos_ids)
        embeds = torch.cat(
            [bos_embeds, labels_in_embeds, eos_embeds], dim=1)
        labels = torch.cat([bos_ids, labels_out_ids], dim=1)
        outputs = self.llama_model(
            inputs_embeds=embeds,
            labels=labels,
        )
        loss = outputs['loss']  # 0维张量，纯数字
        return {"loss": loss}

    def save_self(self, paths):
        self.llama_model.save_pretrained(paths)
