import argparse
import functools
import os
import shutil

import numpy as np
import torch

from .modules.ecapa_tdnn import EcapaTdnn, SpeakerIdetification
from .data_utils.reader import load_audio
from audioapp.models import voiceprint
from audioapp.serializers import VoiceprintSerializer
import base64


class VoiceprintRecognition():
    def __init__(self, threshold=0.5, feature_method="melspectrogram"):
        # 'feature_method',   str,    'melspectrogram',         '音频特征提取方法', choices=['melspectrogram', 'spectrogram'])
        self.threshold = threshold
        self.feature_method = feature_method
        self.use_model = 'ecapa_tdnn'
        # 'audio_duration',   float,  3,                        '预测的音频长度，单位秒'
        self.audio_duration = 3
        self.device = torch.device("cuda")
        self.voiceprint_feature = []
        self.voiceprint_name = []
        self._feature_len = 192
        self.__loadModel__()

    def __loadModel__(self, model_name="ecapa_tdnn"):
        if self.feature_method == 'melspectrogram':
            input_size = 80
        elif self.feature_method == 'spectrogram':
            input_size = 201
        ecapa_tdnn = EcapaTdnn(input_size=input_size)
        self.model = SpeakerIdetification(backbone=ecapa_tdnn).to(self.device)
        model_path = os.path.join(
            "voiceprint_recognition", "models", model_name, 'model.pth')
        print("model_path:", model_path)
        model_dict = self.model.state_dict()
        param_state_dict = torch.load(model_path)
        for name, weight in model_dict.items():
            if name in param_state_dict.keys():
                if list(weight.shape) != list(param_state_dict[name].shape):
                    param_state_dict.pop(name, None)
        self.model.load_state_dict(param_state_dict, strict=False)
        print(f"成功加载模型参数和优化方法参数：{model_path}")
        self.model.eval()

    def get_feature(self, audio_path):
        data = load_audio(audio_path, mode='infer',
                          feature_method=self.feature_method)
        data = data[np.newaxis, :]
        data = torch.tensor(data, dtype=torch.float32, device=self.device)
        # 执行预测
        feature = self.model.backbone(data)
        return feature.data.cpu().numpy()[0]

    def compare_1v1(self, audio1_path, audio2_path):
        fea1 = self.get_feature(audio1_path)
        fea2 = self.get_feature(audio2_path)
        sim = np.dot(fea1, fea2) / \
            (np.linalg.norm(fea1) * np.linalg.norm(fea2))
        return sim

    def compare_1vn(self, fea):
        fea = fea.reshape(self._feature_len, 1)
        res = np.matmul(self._voiceprint_db_fea, fea)
        res = res[:, 0]
        max_value = np.max(res)
        max_ind = np.argmax(res)
        return max_ind, max_value

    @property
    def voiceprint_db_fea(self):
        return self._voiceprint_db_fea

    @property
    def vocieprint_db_name(self):
        return self._voiceprint_db_name

    @property
    def feature_len(self):
        return self._feature_len

    def update_db(self):
        queryset = voiceprint.objects.all()
        serializer = VoiceprintSerializer(queryset, many=True)
        instances = serializer.instance
        db_fea = []
        db_name = []
        for (ind, instance) in enumerate(instances):
            db_name.append(instance.user)
            fea = instance.voiceprint
            fea = np.frombuffer(base64.b64decode(
                fea), np.float32).reshape(1, self._feature_len)
            db_fea.append(fea)
        self._voiceprint_db_fea = np.concatenate(db_fea, 0)
        self._vocieprint_db_name = db_name
