from encoder import inference as encoder
from encoder.params_model import model_embedding_size as speaker_embedding_size
from synthesizer.inference import Synthesizer
from vocoder import inference as vocoder
import librosa
import numpy as np
import torch
import soundfile as sf
from pathlib import Path
if __name__ == "__main__":
    seed = 10
    path_encoder = Path("models/encoder.pt")
    path_synthesizer = Path("models/synthesizer.pt")
    path_vocoder = Path("models/vocoder.pt")
    
    # 模型加载
    print("模型加载")
    encoder.load_model(path_encoder)
    synthesizer = Synthesizer(path_synthesizer)
    vocoder.load_model(path_vocoder)
    print("------------模型加载结束！-----------")

    # 读取说话人特征音频
    print("读取说话人音频")
    file_wav_speaker = "male.wav"
    original_wav, sampling_rate = librosa.load(file_wav_speaker)
    preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
 
    print("提取说话人特征")
    embed = encoder.embed_utterance(preprocessed_wav)

    print("音频特征合成")
    text = 'welcome to Ludong university'
    texts = [text]
    embeds = [embed]

    specs = synthesizer.synthesize_spectrograms(texts, embeds)
    spec = specs[0]

    print("音频文件生成")

    if seed is not None:
        torch.manual_seed(seed)
        vocoder.load_model(path_vocoder)
    
    generated_wav = vocoder.infer_waveform(spec,batched=False)
    generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
    generated_wav = encoder.preprocess_wav(generated_wav)

    # 文件保存
    filename = "out.wav"
    print(generated_wav.dtype)
    sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate)
    


