import logging
import os
from typing import Dict, List, Optional, Union
import torchaudio
import torch
from peft import LoraConfig, TaskType, get_peft_model
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer

from wenet.transformer.ctc import CTC, GxlCTCMapper
from wenet.transformer.encoder import TransformerEncoder, TransformerEncoder2
from wenet.llm_asr.utils4llmasr import *
from gxl_ai_utils.utils import utils_file

from wenet.llm_asr.downsampler import get_downsampler, LyzConv1dSubsampling
from wenet.transformer.swish import New_gelu4npu
from wenet.utils.mask import make_pad_mask
import torch.nn.functional as F
import math
import gc


class ModelOnlyCtc(nn.Module):
    def __init__(self,
                 encoder,
                 encoder_output_dim,
                 llm_path,
                 downsample_rate: int=4,
                 ):
        """
        ModelOnlyCtc  , model, link , lm_head
        Args:
            encoder:
            encoder_output_dim:
            llm_path:
        """
        super().__init__()
        utils_file.logging_limit_print(f"耿雪龙： 开始构建ModelOnlyCtc模型")
        self.downsample_rate = downsample_rate

        self.encoder = encoder
        self.ln_speech = nn.LayerNorm(encoder_output_dim)
        self.speech_transformer = TransformerEncoder(
            input_size=encoder_output_dim,
            output_size=encoder_output_dim,
            attention_heads=4,
            linear_units=2560,
            num_blocks=4,
            dropout_rate=0.1,
            positional_dropout_rate=0.1,
            attention_dropout_rate=0.0,
            input_layer="linear",
            pos_enc_layer_type="abs_pos",
            normalize_before=True
        )
        self.llm_dim = 2048
        self.llm_vocab_size = 151936
        self.ctc_linear = nn.Linear(self.llm_dim, self.llm_vocab_size, bias=False)
        # 加载ctc_linear权重
        self.ctc_linear.load_state_dict(torch.load("/home/work_nfs16/xlgeng/code/osum_xlgeng_3B/examples/wenetspeech/only_ctc/datahandle/lm_head.pt", map_location='cpu'))
        print(f"耿雪龙： 加载ctc_linear权重成功","weight:") #真实权重： tensor([[ 0.0391,  0.0142, -0.0154,  ..., -0.0339,  0.0147,  0.0261],
        print(self.ctc_linear.weight)
        self.ctc = GxlCTCMapper(self.ctc_linear,dropout_rate=0.1)
        self.down_sample_2 = get_downsampler(downsample_rate, encoder_output_dim)
        self.speech_llama_proj = nn.Linear(encoder_output_dim, self.llm_dim)
        rank = int(os.environ.get('RANK', 0))
        if rank == 0:
            utils_file.logging_info("ctc_linear:")
            utils_file.print_model_size(self.ctc_linear)
            utils_file.logging_info("encoder:")
            utils_file.print_model_size(self.encoder)
            utils_file.logging_info("speech_transformer:")
            utils_file.print_model_size(self.speech_transformer)
            utils_file.logging_info("speech_llama_proj:")
            utils_file.print_model_size(self.speech_llama_proj)


    def _get_embedding_from_wav(self, wavs, wavs_len):
        """
        return:
        wav_embedding: (b, l, v)
        wav_mask:  (b, l), wav为有效值的位置为true
        """
        encoder_out, encoder_mask = self.encoder(wavs, wavs_len)
        speech_embeds, encoder_mask = self.down_sample_2(encoder_out, encoder_mask)
        filled_wavs_len = encoder_mask.squeeze(1).sum(-1)
        speech_embeds, encoder_mask = self.speech_transformer(speech_embeds, filled_wavs_len)
        speech_embeds = self.speech_llama_proj(speech_embeds)
        mask =  encoder_mask.squeeze(1) # mask: (b, l)
        speech_embeds_lengths = mask.sum(-1)
        return speech_embeds, speech_embeds_lengths

    def forward(self,
                batch,
                device,
                ):
        """"""
        feats = batch['feats'].to(device)
        feats_lengths = batch['feats_lengths'].to(device)
        target = batch['target'].to(device)
        target_lengths = batch['target_lengths'].to(device)
        speech_embeds, speech_lens = self._get_embedding_from_wav(feats, feats_lengths)
        ctc_loss,_ = self.ctc(speech_embeds, speech_lens, target, target_lengths)
        return {"loss": ctc_loss}




    def generate(
            self,
            wavs,
            wavs_len,
            prompt,
    ):
        speech_embeds, speech_masks = self._get_embedding_from_wav(wavs, wavs_len)
        speech_embeds, speech_masks, _ = self._add_bos_eos(0 +self.speech_token_num, 1 +self.speech_token_num,
                                                           speech_embeds, speech_masks, None)
        prompt = self.tokenizer([prompt], return_tensors="pt"
                                )['input_ids'].to(speech_embeds.device)
        prompt_embeds = self.embed_tokens(prompt)

        qwen_instruct_prompt_pattern_1 = "<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n"
        prompt_pattern1 = self.tokenizer([qwen_instruct_prompt_pattern_1] * len(wavs_len), return_tensors="pt"
                                         )['input_ids'].to(speech_embeds.device)
        prompt_pattern1_embeds = self.embed_tokens(prompt_pattern1)

        qwen_instruct_prompt_pattern_2 = "<|im_end|>\n<|im_start|>assistant\n"
        prompt_pattern2 = self.tokenizer([qwen_instruct_prompt_pattern_2] * len(wavs_len), return_tensors="pt"
                                         )['input_ids'].to(speech_embeds.device)
        prompt_pattern2_embeds = self.embed_tokens(prompt_pattern2)

        embeds = torch.cat([prompt_pattern1_embeds, prompt_embeds, speech_embeds, prompt_pattern2_embeds], dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)

        if self.embed_tokens.weight.dtype == torch.float16 or self.embed_tokens.weight.dtype == torch.bfloat16:
            # utils_file.logging_limit_print('generate(): self.embed_tokens.weight.dtype == torch.float16')
            # embeds = embeds.to(torch.float16)
            embeds = embeds.to(torch.bfloat16)
            atts = atts.to(torch.bfloat16)
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            max_new_tokens=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            top_k=self.top_k,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            eos_token_id=self.eos_token_id,
            pad_token_id=-100,
        )

        output_text = self.tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)
        return output_text

