import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from wav2vec import Wav2Vec2Model, Wav2Vec2ForCTC, linear_interpolation
import os
import numpy as np
from pathlib import Path
from typing import Dict, Union, Optional
from utils.logger import Logger
import subprocess
import librosa
from transformers import Wav2Vec2Processor
import time
from types import SimpleNamespace


# Temporal Bias, brrowed from https://github.com/EvelynFan/FaceFormer/blob/main/faceformer.py
def init_biased_mask(n_head, max_seq_len, period):
    def get_slopes(n):
        def get_slopes_power_of_2(n):
            start = (2 ** (-2 ** -(math.log2(n) - 3)))
            ratio = start
            return [start * ratio ** i for i in range(n)]

        if math.log2(n).is_integer():
            return get_slopes_power_of_2(n)
        else:
            closest_power_of_2 = 2 ** math.floor(math.log2(n))
            return get_slopes_power_of_2(closest_power_of_2) + get_slopes(2 * closest_power_of_2)[0::2][
                                                               :n - closest_power_of_2]

    slopes = torch.Tensor(get_slopes(n_head))
    bias = torch.arange(start=0, end=max_seq_len, step=period).unsqueeze(1).repeat(1, period).view(-1) // (period)
    bias = - torch.flip(bias, dims=[0])
    alibi = torch.zeros(max_seq_len, max_seq_len)
    for i in range(max_seq_len):
        alibi[i, :i + 1] = bias[-(i + 1):]
    alibi = slopes.unsqueeze(1).unsqueeze(1) * alibi.unsqueeze(0)
    mask = (torch.triu(torch.ones(max_seq_len, max_seq_len)) == 1).transpose(0, 1)
    mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
    mask = mask.unsqueeze(0) + alibi
    return mask


# Input Representation Adjustment, brrowed from https://github.com/galib360/FaceXHuBERT
def inputRepresentationAdjustment(audio_embedding_matrix, vertex_matrix, ifps, ofps):
    if ifps % ofps == 0:
        factor = -1 * (-ifps // ofps)
        if audio_embedding_matrix.shape[1] % 2 != 0:
            audio_embedding_matrix = audio_embedding_matrix[:, :audio_embedding_matrix.shape[1] - 1]

        if audio_embedding_matrix.shape[1] > vertex_matrix.shape[1] * 2:
            audio_embedding_matrix = audio_embedding_matrix[:, :vertex_matrix.shape[1] * 2]

        elif audio_embedding_matrix.shape[1] < vertex_matrix.shape[1] * 2:
            vertex_matrix = vertex_matrix[:, :audio_embedding_matrix.shape[1] // 2]
    else:
        factor = -1 * (-ifps // ofps)
        audio_embedding_seq_len = vertex_matrix.shape[1] * factor
        audio_embedding_matrix = audio_embedding_matrix.transpose(1, 2)
        audio_embedding_matrix = F.interpolate(audio_embedding_matrix, size=audio_embedding_seq_len, align_corners=True,
                                               mode='linear')
        audio_embedding_matrix = audio_embedding_matrix.transpose(1, 2)

    frame_num = vertex_matrix.shape[1]
    audio_embedding_matrix = torch.reshape(audio_embedding_matrix, (
    1, audio_embedding_matrix.shape[1] // factor, audio_embedding_matrix.shape[2] * factor))
    return audio_embedding_matrix, vertex_matrix, frame_num


class SelfTalk(nn.Module):
    def __init__(self):
        super(SelfTalk, self).__init__()
        self.dataset = 'vocaset'
        self.period=25
        self.feature_dim=512
        self.vertice_dim=15069
        self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
        self.audio_encoder = Wav2Vec2Model.from_pretrained(r"J:\\coding\\audio\\pretrain\wav2vec2-large-xlsr-53-english")
        self.text_encoder = Wav2Vec2ForCTC.from_pretrained(r"J:\\coding\\audio\\pretrain\wav2vec2-large-xlsr-53-english")
        # for param in self.text_encoder.parameters():
        #     param.requires_grad = False
        self.audio_encoder.feature_extractor._freeze_parameters()

        if self.dataset == "vocaset":
            pkl_path = "./vocaset/FLAME_masks_.pkl"
            with open(pkl_path, 'rb') as f:
                self.lip_mask = pickle.load(f, encoding="latin1")["lips"]
                self.lip_map = nn.Linear(254 * 3, 1024)

        elif self.dataset == "BIWI":
            with open('./BIWI/BIWI_lip.pkl', 'rb') as f:
                self.lip_mask = pickle.load(f, encoding="latin1")
                self.lip_map = nn.Linear(4996 * 3, 1024)

        self.biased_mask = init_biased_mask(n_head=4, max_seq_len=600, period=self.period)
        decoder_layer = nn.TransformerDecoderLayer(d_model=self.feature_dim, nhead=4,
                                                   dim_feedforward=2 * self.feature_dim, batch_first=True)
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=1)
        if self.dataset == "vocaset":
            self.audio_feature_map = nn.Linear(1024, self.feature_dim)
            self.transformer = nn.Transformer(d_model=1024, batch_first=True)
        elif self.dataset == "BIWI":
            self.audio_feature_map = nn.Linear(2048, self.feature_dim)
            self.transformer = nn.Transformer(d_model=1024, batch_first=True)
        self.vertice_map_r = nn.Linear(self.feature_dim, self.vertice_dim)
        
        self.dropout = nn.Dropout(p=0.0, inplace=False)
        self.lm_head = nn.Linear(1024, 33)

        nn.init.constant_(self.vertice_map_r.weight, 0)
        nn.init.constant_(self.vertice_map_r.bias, 0)

    def forward(self, audio, template, vertice):

        template = template.unsqueeze(1)
        frame_num = vertice.shape[1]
        hidden_states = self.audio_encoder(audio, self.dataset, frame_num=frame_num).last_hidden_state

        if self.dataset == "BIWI":
            hidden_states, vertice, frame_num = inputRepresentationAdjustment(hidden_states, vertice, 50, 25)
            hidden_states = hidden_states[:, :frame_num]
        elif self.dataset == "vocaset":
            pass
        vertice_input = self.audio_feature_map(hidden_states)
        vertice_out = self.transformer_decoder(vertice_input, vertice_input)
        vertice_out = self.vertice_map_r(vertice_out)
        audio_model = self.text_encoder(audio)
        text_hidden_states = audio_model.hidden_states
        text_logits = audio_model.logits
        frame_num = text_hidden_states.shape[1]
        lip_out = vertice_out.reshape(vertice_out.shape[0], vertice_out.shape[1], -1, 3)[:, :, self.lip_mask,
                  :].reshape(vertice_out.shape[0], vertice_out.shape[1], -1)
        lip_gt = vertice.reshape(vertice.shape[0], vertice.shape[1], -1, 3)[:, :, self.lip_mask, :].reshape(
            vertice.shape[0], vertice.shape[1], -1)
        lip_offset = self.lip_map(lip_out)

        if self.dataset == "vocaset":
            lip_offset = linear_interpolation(lip_offset, 30, 50, output_len=frame_num)
        elif self.dataset == "BIWI":
            text_hidden_states = text_hidden_states[:, :vertice_out.shape[1] * 2]
            text_logits = text_logits[:, :vertice_out.shape[1] * 2]
            frame_num = text_hidden_states.shape[1]
            lip_offset = linear_interpolation(lip_offset, 25, 50, output_len=frame_num)
        lip_features = self.transformer(lip_offset, lip_offset)
        logits = self.lm_head(self.dropout(lip_features))
        vertice_out = vertice_out + template

        return vertice_out, vertice, lip_features, text_hidden_states, logits, text_logits

    def predict(self, audio, template):
        template = template.unsqueeze(1)
        hidden_states = self.audio_encoder(audio, self.dataset).last_hidden_state
        if self.dataset == "BIWI":
            if hidden_states.shape[1] % 2 != 0:
                hidden_states = hidden_states[:, :hidden_states.shape[1] - 1]
            hidden_states = torch.reshape(hidden_states, (1, hidden_states.shape[1] // 2, hidden_states.shape[2] * 2))
        elif self.dataset == "vocaset":
            pass
        vertice_input = self.audio_feature_map(hidden_states)
        vertice_out = self.transformer_decoder(vertice_input, vertice_input)
        vertice_out = self.vertice_map_r(vertice_out)
        lip_offset = vertice_out.reshape(vertice_out.shape[0], vertice_out.shape[1], -1, 3)[:, :, self.lip_mask,
                     :].reshape(vertice_out.shape[0], vertice_out.shape[1], -1)
        lip_offset = self.lip_map(lip_offset)

        if self.dataset == "vocaset":
            lip_offset = linear_interpolation(lip_offset, 30, 50, output_len=None)
        elif self.dataset == "BIWI":
            lip_offset = linear_interpolation(lip_offset, 25, 50, output_len=None)
        lip_features = self.transformer(lip_offset, lip_offset)
        if self.dataset == "vocaset":
            vertice_out = vertice_out + template
        elif self.dataset == "BIWI":
            vertice_out = vertice_out + template
        logits = self.lm_head(self.dropout(lip_features))

        return vertice_out, lip_features, logits


class SelfTalkHandler:
    def __init__(self, 
                 checkpoint_path: str = "J:\\coding\\audio\\selftalk\\vocaset\\vocaset.pth",
                 device: str = "cuda:0" if torch.cuda.is_available() else "cpu"):
        """
        初始化SelfTalkHandler
        
        Args:
            checkpoint_path: 模型检查点路径
            device: 运行设备，默认使用GPU（如可用）
        """

        self.device = device
        self.model = self._load_model(checkpoint_path)
        self.processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
        self.templates = {}  # 用于缓存已加载的模板
    
    
    def _load_model(self, checkpoint_path: str):
        """
        加载SelfTalk模型
        
        Args:
            checkpoint_path: 模型检查点路径
            
        Returns:
            加载好的模型
        """
        try:
      
            # 构建模型配置
            # model_config = {
            #     "device": self.device,
            #     "dataset": "vocaset",  
            #     "feature_dim": 512,    
            #     "vertice_dim": 15069,  
            #     "period": 25          
            # }
            # model = SelfTalk(args=SimpleNamespace(**model_config))
            
            model = SelfTalk()
            model.load_state_dict(torch.load(checkpoint_path, map_location=self.device))
            print('模型加载完成')
            model.to(self.device)
            model.eval()
            return model
        except Exception as e:
      
            raise e
    
    def _load_first_template(self, dataset: str):
        template_file = os.path.join(dataset, "templates_.pkl")
        with open(template_file, 'rb') as fin:
            templates = pickle.load(fin, encoding='latin1')
        # print(templates)
        temp = templates['FaceTalk_170904_00128_TA']
        template = temp.reshape((-1))
        template = np.reshape(template, (-1, template.shape[0]))
        return template
    

    def _load_template(self, dataset: str):
        """加载模板数据
        
        Args:
            dataset: 数据集名称 ('vocaset' 或 'BIWI')
            
        Returns:
            处理好的模板张量
        """
        # 使用数据集名称作为缓存键
        cache_key = dataset
        
        # 如果已缓存，直接返回
        if cache_key in self.templates:
            return self.templates[cache_key]
        
        try:
            # 根据数据集选择模板文件
            if dataset == "vocaset":
                template_file = os.path.join(dataset, "templates_.pkl")
            elif dataset == "BIWI":
                template_file = os.path.join(dataset, "templates.pkl")
            else:
                raise ValueError(f"不支持的数据集: {dataset}")
                
   
            
            with open(template_file, 'rb') as fin:
                templates = pickle.load(fin, encoding='latin1')
                
            # 使用第一个模板（索引0）
            temp = templates[0]
            template = temp.reshape((-1))
            template = np.reshape(template, (-1, template.shape[0]))
            template = torch.FloatTensor(template).to(device=self.device)
            
            # 缓存模板以备后用
            self.templates[cache_key] = template
            
            return template
            
        except Exception as e:
            # print(e)
            raise e
    
    def wav2npy(self, wav_path: Union[str, Path], dataset: str = "vocaset") -> Dict[str, np.ndarray]:
        """
        将WAV音频转换为模型所需的特征并预测结果
        
        Args:
            wav_path: WAV文件路径
            dataset: 数据集名称 ('vocaset' 或 'BIWI')
            
        Returns:
            预测结果字典，包含音频特征和预测的动画参数
        """
        wav_path = str(wav_path)
        print(wav_path)

        
        # try:
        #     # 1. 加载模板
        #     template = self._load_template(dataset)
        #     print('加载音频并提取特征')
            
        #     # 2. 加载音频并提取特征
      
        #     test_name = os.path.basename(wav_path).split(".")[0]
        #     speech_array, sampling_rate = librosa.load(wav_path, sr=16000)
            

        #     # 使用 Wav2Vec2 处理器提取特征
        #     audio_feature = np.squeeze(self.processor(speech_array, sampling_rate=16000).input_values)
        #     audio_feature = np.reshape(audio_feature, (-1, audio_feature.shape[0]))
        #     audio_feature = torch.FloatTensor(audio_feature).to(device=self.device)
            
        #     # 3. 使用模型预测
        #     start_time = time.time()
        #     print("开始模型预测...")
            
        #     # 调用模型的 predict 方法
        #     prediction, lip_features, logits = self.model.predict(audio_feature, template)
            
        #     end_time = time.time()
        #     print(f"预测完成，耗时: {end_time - start_time:.2f}秒")
            
        #     # 将预测结果转换为 numpy 数组
        #     prediction_np = prediction.squeeze().detach().cpu().numpy()
            
        #     # 4. 保存结果
        #     output_npy = wav_path.replace('.wav', '.npy')
        #     np.save(output_npy, prediction_np)
        #     print(f"预测结果已保存至: {output_npy}")
            
        #     return {
        #         "audio_feature": audio_feature.cpu().numpy(),
        #         "prediction": prediction_np,
        #         "output_path": output_npy
        #     }
            
        # except Exception as e:
        #     print(f"音频处理失败: {str(e)}")
        #     raise

                # 1. 加载模板
        template = self._load_first_template(dataset)
        print('加载音频并提取特征')
        
        # 2. 加载音频并提取特征
    
        # test_name = os.path.basename(wav_path).split(".")[0]
        speech_array, sampling_rate = librosa.load(wav_path, sr=16000)
        

        # 使用 Wav2Vec2 处理器提取特征
        audio_feature = np.squeeze(self.processor(speech_array, sampling_rate=16000).input_values)
        audio_feature = np.reshape(audio_feature, (-1, audio_feature.shape[0]))
        audio_feature = torch.FloatTensor(audio_feature).to(device=self.device)
        
        # 3. 使用模型预测
        start_time = time.time()
        print("开始模型预测...")
        
        # 调用模型的 predict 方法
        prediction, lip_features, logits = self.model.predict(audio_feature, torch.FloatTensor(template).to(device=self.device))
        
        end_time = time.time()
        print(f"预测完成，耗时: {end_time - start_time:.2f}秒")
        
        # 将预测结果转换为 numpy 数组
        prediction_np = prediction.squeeze().detach().cpu().numpy()
        print(prediction_np.shape)
        
        # 4. 保存结果
        output_npy = wav_path.replace('.wav', '.npy')
        np.save(output_npy, prediction_np)
        print(f"预测结果已保存至: {output_npy}")
        
       
        mesh_data = prediction_np
        
        return {
            "status": "success", 
            "message": "处理完成",
            "mesh_data": mesh_data,
            "audio_feature": audio_feature.cpu().numpy(),
            "prediction": prediction_np,
            "output_path": output_npy
        }
    
    def process_file(self, file_path: Union[str, Path], dataset: str = "vocaset") -> Dict:
        """
        处理文件的便捷方法
        
        Args:
            file_path: 输入文件路径，支持WAV格式
            dataset: 数据集名称 ('vocaset' 或 'BIWI')
            
        Returns:
            处理结果
        """
        file_path = Path(file_path)
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        # 检查文件类型
        if file_path.suffix.lower() == '.wav':
            return self.wav2npy(file_path, dataset=dataset)
        else:
            raise ValueError(f"不支持的文件类型: {file_path.suffix}")

