import torch
import soundfile as sf
import torch.nn as nn
import torch.nn.functional as F
from peft import LoraConfig, TaskType, get_peft_model
from transformers import (
    WhisperFeatureExtractor,
    WhisperModel,
    LlamaForCausalLM,
    LlamaTokenizer,
    AutoModelForCausalLM,
    AutoTokenizer
)
from wenet.transformer.whisper_encoder import OpenAIWhisperEncoder
import soundfile as sf
import logging
# from beats.BEATs import BEATsConfig, BEATs
from wenet.salmonn.qformer.Qformer import BertConfig, BertLMHeadModel
from wenet.utils.common import add_sos_eos
from wenet.transformer.encoder import TransformerEncoder

class SALMONN(nn.Module):
    def __init__(
        self,
        encoder,
        llm_path,
        speech_qformer_token_num=1,
        speech_qformer_layer=2,
        lora=True,
        lora_alpha=32,
        lora_rank=8,
        lora_dropout=0.1,
        second_per_frame=0.333333,
        second_stride=0.333333,
        low_resource=False,
        prompt_pattern="USER: <Speech><SpeechHere></Speech> {}\nASSISTANT:",
        llama_model_generate_max_length=200,
        llama_model_generate_min_length=1,
        llama_model_generate_num_beams=4,
        llama_model_generate_do_sample=True,
        llama_model_generate_top_p=0.9,
        llama_model_generate_repetition_penalty=1.0,
        llama_model_generate_length_penalty=1.0,
        llama_model_generate_temperature=1.0,
        load_epoch_ckpt=False,
        load_step_ckpt=False,
        load_eval_ckpt=False,
        ckpt_path="",
    ):

        super().__init__()

        # whisper
        self.speech_encoder = encoder
        # self.speech_encoder.eval()
        self.hubert_dim2whisper_dim = nn.Linear(1024, 1280)
        self.ln_speech = nn.LayerNorm(1280)

        # beats
        # self.beats_ckpt = beats_path
        # beats_checkpoint = torch.load(self.beats_ckpt, map_location='cpu')
        # beats_cfg = BEATsConfig(beats_checkpoint['cfg'])
        # beats = BEATs(beats_cfg)
        # beats.load_state_dict(beats_checkpoint['model'])
        # self.beats = beats
        # self.ln_audio = nn.LayerNorm(self.beats.cfg.encoder_embed_dim)
        # for name, param in self.beats.named_parameters():
        #     param.requires_grad = False
        # self.beats.eval()

        # init speech Qformer
        # self.speech_Qformer, self.speech_query_tokens = self.init_speech_Qformer(
        #     speech_qformer_token_num,
        #     1280,
        #     speech_qformer_layer,
        # )
        # self.second_per_frame = second_per_frame
        # self.second_stride = second_stride

        # init speech transformer
        self.speech_transformer = TransformerEncoder(
            input_size=1280,
            output_size=1280,
            attention_heads=4,
            linear_units=2560,
            num_blocks=4,
            dropout_rate=0.1,
            positional_dropout_rate=0.1,
            attention_dropout_rate=0.0,
            input_layer="linear",
            pos_enc_layer_type="abs_pos",
            normalize_before=True
        )
        
        # vicuna
        # if not low_resource:
        #     self.llama_model = LlamaForCausalLM.from_pretrained(
        #         llm_path,
        #         torch_dtype=torch.float16,
        #     )
        # else:
        #     self.llama_model = LlamaForCausalLM.from_pretrained(
        #         llm_path,
        #         torch_dtype=torch.float16,
        #         load_in_8bit=True,
        #         device_map="auto"
        #     )
        # Atom LLM
        if not low_resource:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float16,
            )
        else:
            self.llama_model = AutoModelForCausalLM.from_pretrained(
                llm_path,
                torch_dtype=torch.float16,
                load_in_8bit=True,
                device_map="auto"
            )

        self.max_length = llama_model_generate_max_length
        self.min_length = llama_model_generate_min_length
        self.num_beams = llama_model_generate_num_beams
        self.do_sample = llama_model_generate_do_sample
        self.top_p = llama_model_generate_top_p
        self.repetition_penalty = llama_model_generate_repetition_penalty
        self.length_penalty = llama_model_generate_length_penalty
        self.temperature = llama_model_generate_temperature
        self.load_epoch_ckpt = load_epoch_ckpt
        self.load_step_ckpt = load_step_ckpt
        self.load_eval_ckpt = load_eval_ckpt
        # lora
        self.lora = lora
        if lora:
            if self.load_eval_ckpt:
                target_modules = None
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM, 
                    inference_mode=True, 
                    r=lora_rank, 
                    lora_alpha=lora_alpha, 
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            else:
                target_modules = None
                self.peft_config = LoraConfig(
                    task_type=TaskType.CAUSAL_LM, 
                    inference_mode=False, 
                    r=lora_rank, 
                    lora_alpha=lora_alpha, 
                    lora_dropout=lora_dropout,
                    target_modules=target_modules,
                )
            self.llama_model = get_peft_model(self.llama_model, self.peft_config)

        # tokenizer
        # self.llama_tokenizer = LlamaTokenizer.from_pretrained(llm_path, use_fast=False)
        self.llama_tokenizer = AutoTokenizer.from_pretrained(llm_path, use_fast=False)
        self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'}) 
        self.llama_tokenizer.padding_side = "right"

        # proj
        # self.speech_llama_proj = nn.Linear(
        #     self.speech_Qformer.config.hidden_size, self.llama_model.config.hidden_size)
        self.speech_llama_proj = nn.Linear(
            1280, self.llama_model.config.hidden_size)

        # load ckpt
        # ckpt_dict = torch.load(ckpt)['model']
        # self.load_state_dict(ckpt_dict, strict=False)
        self.prompt_pattern = prompt_pattern
        # self.LLM_out_proj = nn.Linear(4096, 32000)
        self.LLM_out_proj = nn.Linear(4096, 65000)
        self.ce_loss = torch.nn.CrossEntropyLoss()
        
        # load checkpoint
        self.ckpt_path = ckpt_path
        if self.load_epoch_ckpt or self.load_step_ckpt or self.load_eval_ckpt:
            checkpoint = torch.load(self.ckpt_path, map_location=self.llama_model.device)
            self.load_state_dict(checkpoint, strict=False)
            logging.info(f"Checkpoint {self.ckpt_path} has been loaded.")

    def forward(
       self,
       wavs,
       wavs_len,
       prompt,
       labels,
    ):
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        speech_embeds = self.ln_speech(speech_embeds) # torch.Size([24, 45, 1280])
        B, T, C = speech_embeds.shape
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        speech_embeds = self.speech_llama_proj(speech_embeds)
        # split frames
        # B, T, C = speech_embeds.shape
        # kernel = round(T * self.second_per_frame / 30.0)
        # stride = round(T * self.second_stride / 30.0)
        # if kernel == 0:
        #     kernel = 1
        # if stride == 0:
        #     stride = 1
        # kernel = (1, kernel)
        # stride = (1, stride)
        # speech_embeds_tr = speech_embeds.transpose(1, 2).unsqueeze(2)
        # speech_embeds_overlap = F.unfold(speech_embeds_tr, kernel_size=kernel, dilation=1, padding=0, stride=stride)
        
        # _, _, L = speech_embeds_overlap.shape
        # speech_embeds_overlap = speech_embeds_overlap.view(B, -1, kernel[1], L)
        # speech_embeds_overlap = torch.permute(speech_embeds_overlap, [0, 3, 2, 1])
        # speech_embeds = speech_embeds_overlap.reshape(-1, kernel[1], C)
        
        # speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long, device=speech_embeds.device)
        

        # # Qformer
        # query_tokens = self.speech_query_tokens.expand(speech_embeds.shape[0], -1, -1)
        # query_output = self.speech_Qformer.bert(
        #     query_embeds=query_tokens,
        #     encoder_hidden_states=speech_embeds,
        #     encoder_attention_mask=speech_atts,
        #     return_dict=True,
        # ) # torch.Size([1080, 1, 1280])
        # speech_embeds = self.speech_llama_proj(query_output.last_hidden_state) # torch.Size([1080, 1, 4096])
        # speech_embeds = speech_embeds.view(B, -1, speech_embeds.size(2)).contiguous() # torch.Size([24, 45, 4096])
       
        # speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long)
        

        # USER: <Speech>speech_embeds<Speech> prompt\nASSISTANT:
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split('<SpeechHere>') # prompt_left: USER: <Speech>

        # prompts_right: 
#         </Speech> Describe the speech.\nASSISTANT:
        prompt_left_ids = self.llama_tokenizer(
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids).repeat_interleave(B, dim=0) # torch.Size([17, 7, 4096])
        prompt_left_ids = prompt_left_ids.repeat_interleave(B, dim=0) # torch.Size([17, 7])

        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids # tensor([[ 1533, 10649,  5309, 29958, 20355,   915,   278, 12032, 29889,    13, 22933,  9047, 13566, 29901]], device='cuda:0')
        prompt_right_embeds = embed_tokens(prompt_right_ids).repeat_interleave(B, dim=0) # torch.Size([17, 14, 4096])
        prompt_right_ids = prompt_right_ids.repeat_interleave(B, dim=0) # torch.Size([17, 14])

        labels_ids = labels # torch.Size([17, 13])
        labels_in, labels_out = add_sos_eos(labels_ids, self.llama_tokenizer.bos_token_id, self.llama_tokenizer.eos_token_id, ignore_id=-100)
        labels_in_embeds = embed_tokens(labels_in) # torch.Size([17, 13, 4096])
        # print("================labels_in==========", labels_in)
        # print("================labels_out============", labels_out)

        bos_ids = torch.ones([B, 1], dtype=torch.long, device=speech_embeds.device) * self.llama_tokenizer.bos_token_id # torch.Size([17, 1]), bos_token_id is 1
        bos_embeds = self.llama_model.model.embed_tokens(
            bos_ids
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            bos_ids
        ) # torch.Size([17, 1, 4096])

        eos_ids = torch.ones([B, 1], dtype=torch.long, device=speech_embeds.device) * self.llama_tokenizer.eos_token_id # torch.Size([17, 1]), eos_token_id is 2
        eos_embeds = self.llama_model.model.embed_tokens(
            eos_ids
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            eos_ids
        )
        
        speech_embeds_B, speech_embeds_T = speech_embeds.size(0), speech_embeds.size(1)
        speech_ids = torch.ones([speech_embeds_B, speech_embeds_T], dtype=torch.long, device=speech_embeds.device)

        concat_ids = torch.cat([bos_ids, prompt_left_ids, speech_ids, prompt_right_ids], dim=1)
        filled_ids = concat_ids.fill_(-100) # In CrossEntropyLoss(), ignore_index = -100

        embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds, labels_in_embeds, eos_embeds], dim=1)
        labels = torch.cat([filled_ids, labels_out], dim=1)
        # print("===========labels=========", labels)

        # atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(speech_embeds.device)
        # /peft/peft_model.py(665)forward()
        outputs = self.llama_model(
            inputs_embeds=embeds,
            labels=labels,
        )
        loss = outputs['loss']

        # self.max_length = labels.size(1) + bos_embeds.size(1) + prompt_left_embeds.size(1) + prompt_right_embeds.size(1) + 2
        # self.min_length = self.max_length
        # self.num_beams = 1 
        # # generate
        # outputs = self.llama_model.generate(
        #     inputs_embeds=embeds,
        #     max_length=self.max_length,
        #     num_beams=self.num_beams,
        #     do_sample=self.do_sample,
        #     min_length=self.min_length,
        #     top_p=self.top_p,
        #     repetition_penalty=self.repetition_penalty,
        #     length_penalty=self.length_penalty,
        #     temperature=self.temperature,
        #     attention_mask=atts,
        #     bos_token_id=self.llama_tokenizer.bos_token_id,
        #     eos_token_id=self.llama_tokenizer.eos_token_id,
        #     pad_token_id=self.llama_tokenizer.pad_token_id,
        #     output_hidden_states=True,
        #     return_dict_in_generate=True
        # )
        
        # output_hidden_states_list = []
        # for i in range(len(outputs['hidden_states'])):
        #     if i == 0:
        #         continue
        #     else:
        #         output_hidden_states_list.append(outputs['hidden_states'][i][-1])
        # output_hidden_states = torch.stack(output_hidden_states_list, dim=1).view(B,len(output_hidden_states_list),output_hidden_states_list[0].size(-1))

        # output_hidden_states = output_hidden_states.to(torch.float32)

        # output_hidden_states = self.LLM_out_proj(output_hidden_states.to(torch.float32))
        # output_hidden_states = output_hidden_states[:,bos_embeds.size(1) + prompt_left_embeds.size(1):-prompt_right_embeds.size(1),:]

        # loss = self.ce_loss(output_hidden_states.permute(0,2,1), labels)


        return {"loss": loss}

    def generate(
        self,
        wavs,
        wavs_len,
        prompt,
    ):
        # import pdb;pdb.set_trace()
        speech_embeds, speech_lens = self.speech_encoder(wavs, wavs_len)
        speech_embeds = self.hubert_dim2whisper_dim(speech_embeds)
        B, T, C = speech_embeds.shape
        speech_embeds, speech_masks = self.speech_transformer(speech_embeds, speech_lens)
        speech_embeds = self.speech_llama_proj(speech_embeds)
        # split frames
        # B, T, C = speech_embeds.shape
        # kernel = round(T * self.second_per_frame / 30.0)
        # stride = round(T * self.second_stride / 30.0)
        # if kernel == 0:
        #     kernel = 1
        # if stride == 0:
        #     stride = 1
        # kernel = (1, kernel)
        # stride = (1, stride)
        # speech_embeds_tr = speech_embeds.transpose(1, 2).unsqueeze(2)
        # speech_embeds_overlap = F.unfold(speech_embeds_tr, kernel_size=kernel, dilation=1, padding=0, stride=stride)
        # _, _, L = speech_embeds_overlap.shape
        # speech_embeds_overlap = speech_embeds_overlap.view(B, -1, kernel[1], L)
        # speech_embeds_overlap = torch.permute(speech_embeds_overlap, [0, 3, 2, 1])
        # speech_embeds = speech_embeds_overlap.reshape(-1, kernel[1], C)
        # speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long, device=speech_embeds.device)

        # # Qformer
        # query_tokens = self.speech_query_tokens.expand(speech_embeds.shape[0], -1, -1)
        # query_output = self.speech_Qformer.bert(
        #     query_embeds=query_tokens,
        #     encoder_hidden_states=speech_embeds,
        #     encoder_attention_mask=speech_atts,
        #     return_dict=True,
        # )
        # speech_embeds = self.speech_llama_proj(query_output.last_hidden_state)
        # speech_embeds = speech_embeds.view(B, -1, speech_embeds.size(2)).contiguous()
        # speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long).to(speech_embeds.device)

        # USER: <Speech>speech_embeds<Speech> prompt\nASSISTANT:
        embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens
        prompt_left, prompts_right = self.prompt_pattern.format(prompt).split('<SpeechHere>') # prompt_left: 'USER: <Speech>', prompt_right: '</Speech> Describe the speech.\nASSISTANT:'
        prompt_left_ids = self.llama_tokenizer(
            prompt_left,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids # tensor([[ 3148,  1001, 29901,   529, 10649,  5309, 29958]], device='cuda:0')
        prompt_left_embeds = embed_tokens(prompt_left_ids) # torch.Size([1, 7, 4096])
        prompt_right_ids = self.llama_tokenizer(
            prompts_right,
            return_tensors="pt",
            add_special_tokens=False
        ).to(speech_embeds.device).input_ids # tensor([[ 1533, 10649,  5309, 29958, 20355,   915,   278, 12032, 29889,    13, 22933,  9047, 13566, 29901]], device='cuda:0')
        prompt_right_embeds = embed_tokens(prompt_right_ids) # torch.Size([1, 14, 4096])

        bos_embeds = self.llama_model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        ) if not self.lora else self.llama_model.model.model.embed_tokens(
            torch.ones(
                [B, 1],
                dtype=torch.long,
                device=speech_embeds.device,
            ) * self.llama_tokenizer.bos_token_id
        ) # torch.Size([1, 14, 4096])

        embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1)
        atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device)
        # import pdb;pdb.set_trace()
        # generate
        # peft/peft_model.py(726)generate()
        outputs = self.llama_model.generate(
            inputs_embeds=embeds,
            max_length=self.max_length,
            num_beams=self.num_beams,
            do_sample=self.do_sample,
            min_length=self.min_length,
            top_p=self.top_p,
            repetition_penalty=self.repetition_penalty,
            length_penalty=self.length_penalty,
            temperature=self.temperature,
            attention_mask=atts,
            bos_token_id=self.llama_tokenizer.bos_token_id,
            eos_token_id=self.llama_tokenizer.eos_token_id,
            pad_token_id=self.llama_tokenizer.pad_token_id,
        )
        
        output_text = self.llama_tokenizer.batch_decode(outputs, add_special_tokens=False, skip_special_tokens=True)


        return output_text

    def init_speech_Qformer(self, num_query_token, speech_width, num_hidden_layers=2):
        encoder_config = BertConfig()
        encoder_config.num_hidden_layers = num_hidden_layers
        encoder_config.encoder_width = speech_width
        encoder_config.add_cross_attention = True
        encoder_config.cross_attention_freq = 1
        encoder_config.query_length = num_query_token
        Qformer = BertLMHeadModel(config=encoder_config)
        query_tokens = nn.Parameter(
            torch.zeros(1, num_query_token, encoder_config.hidden_size)
        )
        query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
        return Qformer, query_tokens
    
if __name__ == '__main__':
    device = torch.device("cuda")
    model = SALMONN(
        encoder=OpenAIWhisperEncoder(),
        llm_path='/home/local_data/vicuna-7b-v1.5',
    )
    model.to(device)
    model.eval()
    wav = sf.read("/home/work_nfs/common/data/data_aishell/wav/test/S0764/BAC009S0764W0121.wav")[0]
    wav = torch.tensor(wav)
    wav_len = torch.tensor([wav.size(0)])
    prompt = 'Describe the speech.'
    with torch.no_grad():
        import pdb;pdb.set_trace()
        output = model.generate(wav, wav_len, prompt)
    