import numpy as np
import librosa
import os, pickle
from SelfTalk import SelfTalk
import torch
import time
from transformers import Wav2Vec2Processor


class SATalkerHandler:
    def __init__(self):
        self.model = None
        self.processor = None
        self.model_path = ''
        self.device = 'cpu'
        self.dataset = 'vocaset'
        self.subject_index = 0
        self.result_path = 'datasample'

        self.model_config = {"device": self.device,"processor_path":"jonatasgrosman/wav2vec2-large-xlsr-53-english"}

    def load_model(self):
        """加载模型，只在首次运行时加载"""
        if self.model is None:
            # 构建模型
            self.model = SelfTalk(self.model_config)
            model_path = os.path.join(self.model_path)
            self.model.load_state_dict(torch.load(model_path, map_location=torch.device(self.device)))
            self.model = self.model.to(torch.device(self.device))
            self.model.eval()
            print("Model loaded successfully in {}.".format(self.device))

        if self.processor is None:
            # 加载语音特征提取器
            self.processor = Wav2Vec2Processor.from_pretrained(self.model_config['processor_path'])
            print("Processor loaded successfully in {}.".format(self.model_config['processor_path']))

    def wav2mesh(self):
        if not os.path.exists(self.result_path):
            os.makedirs(self.result_path, exist_ok=True)

        # 加载模板
        template_file = os.path.join(self.dataset, 'templates.pkl')
        with open(template_file, 'rb') as fin:
            templates = pickle.load(fin, encoding='latin1')

        temp = templates[self.subject_index]
        template = temp.reshape((-1))
        template = np.reshape(template, (-1, template.shape[0]))
        template = torch.FloatTensor(template).to(device=self.device)

        # 加载音频并提取特征
        wav_path = args.wav_path
        # TODO: 修改wav
        test_name = os.path.basename(wav_path).split(".")[0]
        speech_array, sampling_rate = librosa.load(os.path.join(wav_path), sr=16000)
        audio_feature = np.squeeze(self.processor(speech_array, sampling_rate=16000).input_values)
        audio_feature = np.reshape(audio_feature, (-1, audio_feature.shape[0]))
        audio_feature = torch.FloatTensor(audio_feature).to(device=self.device)

        # 预测
        start = time.time()
        prediction, lip_features, logits = self.model.predict(audio_feature, template)
        end = time.time()
        print("Model predict time: ", end - start)
        prediction = prediction.squeeze()
        np.save(os.path.join(self.result_path, test_name), prediction.detach().cpu().numpy())
