import logging
import os

import torchaudio
import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.transformer.encoder import TransformerEncoder
from wenet.llm_asr.utils4llmasr import *
from gxl_ai_utils.utils import utils_file

from wenet.llm_asr.downsampler import get_downsampler, LyzConv1dSubsampling
from wenet.utils.mask import make_pad_mask
from wenet.one_embedding.speech_encoder import SpeechEncoder


class LLMASR_Model(nn.Module):
    def __init__(self,
                 encoder,
                 llm_path,
                 query_num=1,
                 lora=True, lora_alpha=32, lora_rank=8, lora_dropout=0.1,is_inference=False):
        """"""
        super().__init__()

        self.encoder = encoder
        self.llama_model = AutoModelForCausalLM.from_pretrained(
            llm_path,
            torch_dtype=torch.bfloat16,
            trust_remote_code=True,
            output_hidden_states=True,
        )
        self.max_length = 400
        self.min_length = 1
        self.num_beams = 1
        self.do_sample = False
        self.top_p = 0.95
        self.top_k = 5
        self.repetition_penalty = 1.05
        self.length_penalty = 1.0
        self.temperature = 1.0
        self.IGNORE_ID = -100

        # lora
        self.lora = lora
        if lora:
            utils_file.logging_limit_print("OSUM: 使用lora了")
            target_modules = ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'down_proj']
            if is_inference:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=True,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            else:
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM,
                    inference_mode=False,
                    r=lora_rank,
                    lora_alpha=lora_alpha,
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            self.llama_model = get_peft_model(self.llama_model, self.peft_config)

        # tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(
            llm_path, use_fast=False, trust_remote_code=True)
        """
        设置分词器的pad_token和padding的方向。
        """
        self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
        self.tokenizer.padding_side = "right"
        self.llm_embed_dim = self.llama_model.config.hidden_size
        self.embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        self.lm_head = self.llama_model.model.lm_head if self.lora else self.llama_model.lm_head
        self.gxl_query_vector = nn.Parameter(torch.zeros(1, query_num, self.llm_embed_dim))
        print("gxl_query_vector shape:", self.gxl_query_vector.shape)


    def get_label_embedding(self, labels, labels_lengths):
        """"""
        labels_pad_mask = make_pad_mask(labels_lengths)  # B, L
        labels = labels.masked_fill(labels_pad_mask, 0)
        labels_embeds = self.embed_tokens(labels)
        labels_target = labels.masked_fill(labels_pad_mask, self.IGNORE_ID)  # B, L
        labels_mask = ~labels_pad_mask
        return labels_embeds, labels_target, labels_mask
    def get_embedding_from_wav(self, wavs, wavs_len,query_for_speech=None):
        """
        return:
        wav_embedding: (b, l, v)
        wav_mask:  (b, l), wav为有效值的位置为true
        """
        rank = int(os.environ.get('RANK', 0))
        query_vector_batch = self.encoder(wavs, wavs_len, query_for_speech=query_for_speech)
        return query_vector_batch

    def forward(self,
                batch,
                device,
                ):
        """"""
        rank = int(os.environ.get('RANK', 0))
        output_type = batch['output_type']
        assert output_type in ['text',], f"output_type:{output_type} not support"
        wavs = batch['feats'].to(device)
        wavs_len = batch['feats_lengths'].to(device)

        prompt = batch['prompt'].to(device)
        prompt_lengths = batch['prompt_lengths'].to(device)
        prompt_pad_mask = make_pad_mask(prompt_lengths)  # B, L
        prompt = prompt.masked_fill(prompt_pad_mask, self.tokenizer.eos_token_id)
        prompt_embeds = self.embed_tokens(prompt)  # B, L, D
        catted_embeds = torch.cat([prompt_embeds, self.gxl_query_vector.expand(prompt_embeds.shape[0], -1, -1)],dim=1)
        # prompt_target = torch.full([catted_embeds.shape[0], catted_embeds.shape[1]], self.IGNORE_ID).to(
        #     wavs.device)  # B, L
        new_prompt_pad_mask = make_pad_mask(prompt_lengths + self.gxl_query_vector.shape[1])
        prompt_mask = ~new_prompt_pad_mask

        position_ids = prompt_mask.long().cumsum(-1) - 1
        position_ids.masked_fill_(prompt_mask == 0, 1)
        outputs = self.llama_model(
            inputs_embeds=catted_embeds,
            # labels=target, # 这里不计算ce 损失
            attention_mask=prompt_mask,
            output_hidden_states=True,
            position_ids=position_ids.to(catted_embeds.device)
        )
        hidden_states = outputs.hidden_states[-1]  # B, L, D
        query_for_speech = hidden_states[:, -1:, :]  # B, 1, D
        query_carry_speech = self.get_embedding_from_wav(wavs, wavs_len, query_for_speech)
        query_target = torch.tensor([[self.IGNORE_ID]], device=device, dtype=torch.long).expand([wavs.shape[0], query_for_speech.shape[1]])
        query_mask = torch.tensor([[True]], device=device, dtype=torch.bool).expand([wavs.shape[0], query_for_speech.shape[1]])

        cache = outputs.past_key_values

        labels = batch['target'].to(device)
        labels_lengths = batch['target_lengths'].to(device)
        labels_embeds, labels_target, labels_mask = self.get_label_embedding(labels, labels_lengths)

        catted_embeds2 = torch.cat([query_carry_speech, labels_embeds], dim=1)
        catted_mask2 = torch.cat([query_mask, labels_mask], dim=1)
        catted_target2 = torch.cat([query_target, labels_target], dim=1)
        position_ids = catted_mask2.long().cumsum(-1) - 1
        position_ids.masked_fill_(catted_mask2 == 0, 1)
        outputs2 = self.llama_model(
            inputs_embeds=catted_embeds2,
            labels=catted_target2,
            attention_mask=catted_mask2,
            position_ids=position_ids.to(catted_embeds2.device),
            past_key_values=cache, # 这里使用之前的cache,也就是把prompt也传给llm
        )
        loss = outputs2['loss']
        return {'loss': loss}







    def generate(
            self,
            wavs,
            wavs_len,
            prompt,
    ):
        device = wavs.device
        num_query = self.gxl_query_vector.shape[1]
        if prompt != "<no_prompt>":
            prompt = self.tokenizer([prompt], return_tensors="pt"
                                    )['input_ids'].to(device)
            prompt_embeds = self.embed_tokens(prompt)
        else:
            prompt_embeds = None
        catted_embeds = torch.cat([prompt_embeds, self.gxl_query_vector.expand(prompt_embeds.shape[0], -1, -1)], dim=1)
        prompt_lengths = torch.tensor([prompt_embeds.shape[1]], device=device, dtype=torch.int32)
        new_prompt_pad_mask = make_pad_mask(prompt_lengths + num_query)
        prompt_mask = ~new_prompt_pad_mask

        position_ids = prompt_mask.long().cumsum(-1) - 1
        position_ids.masked_fill_(prompt_mask == 0, 1)
        outputs = self.llama_model(
            inputs_embeds=catted_embeds,
            # labels=target, # 这里不计算ce 损失
            attention_mask=prompt_mask,
            output_hidden_states=True,
            position_ids=position_ids.to(catted_embeds.device)
        )
        hidden_states = outputs.hidden_states[-1]  # B, L, D
        query_for_speech = hidden_states[:, -num_query:, :]  # B, 1, D
        query_carry_speech = self.get_embedding_from_wav(wavs, wavs_len, query_for_speech)
        utils_file.logging_limit_print(f"query_carry_speech shape: {query_carry_speech.shape}")
        query_mask = torch.tensor([True]*num_query, device=device, dtype=torch.bool).expand([wavs.shape[0], -1])

        # catted_embeds2 = torch.cat([catted_embeds,query_carry_speech], dim=1)
        # new_prompt_pad_mask = make_pad_mask(prompt_lengths + num_query*2)
        # catted_mask2 = ~new_prompt_pad_mask

        # cache = outputs.past_key_values
        outputs = self.llama_model.generate(
            inputs_embeds=query_carry_speech,
            max_new_tokens=self.max_length,
            min_length=self.min_length,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=query_mask,
            eos_token_id=151643,
            pad_token_id=-100,
            # past_key_values=cache,
        )

        output_text = self.tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)

        return output_text


    def get_embedding_from_text(self, text):
        text_id = self.tokenizer(
            text,
            return_tensors="pt",
            add_special_tokens=False
        ).to(
            self.embed_tokens.weight.device).input_ids
        text_embeds = self.embed_tokens(text_id)
        return text_embeds

    def get_embeds_from_wav_path(self, wav_path):
        wav_i2_path = wav_path
        waveform_i2, _ = torchaudio.load(wav_i2_path)
        if len(waveform_i2.shape) != 1:
            waveform_i2 = waveform_i2[0]
        waveform_i2 = waveform_i2.to(self.embed_tokens.weight.device)
        wavs_len_i2 = torch.tensor([len(waveform_i2)], device=self.embed_tokens.weight.device, dtype=torch.int32)
        wavs_i2 = waveform_i2.unsqueeze(0)
        sample_i2_embeds = self.get_embedding_from_wav(wavs_i2, wavs_len_i2)
        return sample_i2_embeds

    def _add_bos_eos(self, bos, eos, inputs_embeds, attention_mask, target=None):
        B = len(inputs_embeds)
        bos_eos_target = torch.full([B, 1], self.IGNORE_ID).to(inputs_embeds.device)  # B,1
        bos_eos_mask = torch.full([B, 1], True).to(inputs_embeds.device)  # B, 1

        if bos is not None:
            bos_embed = self.speech_token_emded(torch.full([B, 1],
                                                           bos).to(inputs_embeds.device))  # B, 1, D
            inputs_embeds = torch.cat((bos_embed, inputs_embeds), 1)  # B, (1+T), D
            attention_mask = torch.cat((bos_eos_mask, attention_mask), 1)  # B, (1+T)
            if target is not None:
                target = torch.cat((bos_eos_target, target), 1)  # B, (1+T), D

        if eos is not None:
            eos_embed = self.speech_token_emded(torch.full([B, 1],
                                                           eos).to(inputs_embeds.device))  # B, 1, D
            inputs_embeds = torch.cat((inputs_embeds, eos_embed), 1)  # B, (1+T+1), D
            attention_mask = torch.cat((attention_mask, bos_eos_mask), 1)  # B, (1+T+1)
            if target is not None:
                target = torch.cat((target, bos_eos_target), 1)  # B, (1+T+1), D

        return inputs_embeds, attention_mask, target

