from module.models import SynthesizerTrn
import torch
from .DataStruct import DeviceType
from tools.my_utils import load_audio
from module.mel_processing import spectrogram_torch


class DictToAttrRecursive(dict):
    def __init__(self, input_dict):
        super().__init__(input_dict)
        for key, value in input_dict.items():
            if isinstance(value, dict):
                value = DictToAttrRecursive(value)
            self[key] = value
            setattr(self, key, value)

    def __getattr__(self, item):
        try:
            return self[item]
        except KeyError:
            raise AttributeError(f"Attribute {item} not found")

    def __setattr__(self, key, value):
        if isinstance(value, dict):
            value = DictToAttrRecursive(value)
        super(DictToAttrRecursive, self).__setitem__(key, value)
        super().__setattr__(key, value)

    def __delattr__(self, item):
        try:
            del self[item]
        except KeyError:
            raise AttributeError(f"Attribute {item} not found")


class SoVITS:
    def __init__(self, language_dict, is_half=False, device=DeviceType.CPU):
        self.language_dict = language_dict
        self.isHalf = is_half
        self.device = device.value

    def change_sovits_weights(self, sovits_path, prompt_language=None, text_language=None):
        dict_s2 = torch.load(sovits_path, map_location="cpu")
        self.hps = dict_s2["config"]
        self.hps = DictToAttrRecursive(self.hps)
        self.hps.model.semantic_frame_rate = "25hz"
        if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
            self.hps.model.version = "v1"
        else:
            self.hps.model.version = "v2"
        # print("sovits版本:",hps.model.version)
        self.vq_model = SynthesizerTrn(
            self.hps.data.filter_length // 2 + 1,
            self.hps.train.segment_size // self.hps.data.hop_length,
            n_speakers=self.hps.data.n_speakers,
            **self.hps.model
        )
        if ("pretrained" not in sovits_path):
            del self.vq_model.enc_q
        if self.isHalf == True:
            vq_model = self.vq_model.half().to(self.device)
        else:
            vq_model = self.vq_model.to(self.device)
        vq_model.eval()
        print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
        if prompt_language is not None and text_language is not None:
            if prompt_language in list(self.language_dict.keys()):
                prompt_text_update, prompt_language_update = {'__type__': 'update'}, {'__type__': 'update',
                                                                                      'value': prompt_language}
            else:
                prompt_text_update = {'__type__': 'update', 'value': ''}
                prompt_language_update = {'__type__': 'update', 'value': "中文"}  # i18n("中文")
            if text_language in list(self.language_dict.keys()):
                text_update, text_language_update = {'__type__': 'update'}, {'__type__': 'update',
                                                                             'value': text_language}
            else:
                text_update = {'__type__': 'update', 'value': ''}
                text_language_update = {'__type__': 'update', 'value': "中文"}  # i18n("中文")
            return {'__type__': 'update', 'choices': list(self.language_dict.keys())}, {'__type__': 'update',
                                                                                   'choices': list(
                                                                                       self.language_dict.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update

    def get_spepc(self, filename):
        audio = load_audio(filename, int(self.hps.data.sampling_rate))
        audio = torch.FloatTensor(audio)
        maxx = audio.abs().max()
        if (maxx > 1): audio /= min(2, maxx)
        audio_norm = audio
        audio_norm = audio_norm.unsqueeze(0)
        spec = spectrogram_torch(
            audio_norm,
            self.hps.data.filter_length,
            self.hps.data.sampling_rate,
            self.hps.data.hop_length,
            self.hps.data.win_length,
            center=False,
        )
        return spec