from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder.hifigan import inference as gan_vocoder
from pathlib import Path
import numpy as np
import soundfile as sf
import os 
if __name__ == "__main__":

    # 进行模型加载
    path_encoder = os.path.join('encoder','saved_models','pretrained.pt')
    path_Synthesizer = os.path.join("models",'pretrained-11-7-21_75k.pt')
    path_vocoder = os.path.join('vocoder','saved_models','pretrained','g_hifigan.pt')
    
    encoder.load_model(Path(path_encoder))
    synthesizer = Synthesizer(Path(path_Synthesizer))
    vocoder = gan_vocoder
    vocoder.load_model(Path(path_vocoder))

    # 读取声纹特征语音
    spk_wav = "male.wav"
    # 预处理，进行频率变换
    encoder_wav = synthesizer.load_preprocess_wav(spk_wav)
    # 生成说话人特征
    embed, partial_embeds, _ = encoder.embed_utterance(encoder_wav, return_partials=True)

    texts = ["欢迎来到鲁东大学","鲁东大学欢迎你"]

    embeds = [embed] * len(texts)

    specs = synthesizer.synthesize_spectrograms(texts, embeds)
    breaks = [spec.shape[1] for spec in specs]
    spec = np.concatenate(specs, axis=1)
    # 音频生成
    generated_wav, output_sample_rate = vocoder.infer_waveform(spec)

    # 加入停顿
    b_ends = np.cumsum(np.array(breaks) * synthesizer.hparams.hop_size)
    b_starts = np.concatenate(([0], b_ends[:-1]))
    wavs = [generated_wav[start:end] for start, end, in zip(b_starts, b_ends)]
    breaks = [np.zeros(int(0.15 * synthesizer.sample_rate))] * len(breaks)
    generated_wav = np.concatenate([i for w, b in zip(wavs, breaks) for i in (w, b)])

    # 消除空白音
    generated_wav = encoder.preprocess_wav(generated_wav)
    
    # 幅度调整 
    generated_wav = generated_wav / np.abs(generated_wav).max() * 0.97

    # 音频输出
    filename = "out.wav"
    sf.write(filename, generated_wav, synthesizer.sample_rate)



