#下载预训练文件地址
# https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_baker_ckpt_0.4.zip
# https://paddlespeech.bj.bcebos.com/Parakeet/released_models/pwgan/pwg_baker_ckpt_0.4.zip

#Acoustic Models 声学模型（文本到频谱图）
#下面的代码显示了如何使用 FastSpeech2 模型。加载预训练模型后，使用它和 normalizer 对象构建预测对象，然后使用 fastspeech2_inferencet(phone_ids) 生成频谱图，频谱图可进一步用于使用声码器合成原始音频。

from pathlib import Path
import numpy as np
import paddle
import yaml
from yacs.config import CfgNode
from paddlespeech.t2s.models.fastspeech2 import FastSpeech2
from paddlespeech.t2s.models.fastspeech2 import FastSpeech2Inference
from paddlespeech.t2s.modules.normalizer import ZScore
# examples/fastspeech2/baker/frontend.py
from paddlespeech.t2s.frontend.zh_frontend import Frontend

# 加载预训练模型
checkpoint_dir = Path("F:/vs_code/file/fastspeech2_nosil_baker_ckpt_0.4")

with open(checkpoint_dir / "phone_id_map.txt", "r",encoding='utf-8') as f:

    phn_id = [line.strip().split() for line in f.readlines()]

vocab_size = len(phn_id)

with open(checkpoint_dir / "default.yaml") as f:
    fastspeech2_config = CfgNode(yaml.safe_load(f))
    
odim = fastspeech2_config.n_mels
model = FastSpeech2(
    idim=vocab_size, odim=odim, **fastspeech2_config["model"])
model.set_state_dict(
    paddle.load("F:/vs_code/file/fastspeech2_nosil_baker_ckpt_0.4/snapshot_iter_76000.pdz")["main_params"])
model.eval()

# 加载特征文件
stat = np.load(checkpoint_dir / "speech_stats.npy")
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
fastspeech2_normalizer = ZScore(mu, std)

# 构建预测对象
fastspeech2_inference = FastSpeech2Inference(fastspeech2_normalizer, model)

# load Chinese Frontend
frontend = Frontend(phone_vocab_path=checkpoint_dir / "phone_id_map.txt",tone_vocab_path=None)

# 构建一个中文前端
sentence = "你好吗,我很好"
input_ids = frontend.get_input_ids(sentence, merge_sentences=True)
phone_ids = input_ids["phone_ids"]
flags = 0
# 构建预测对象加载中文前端，对中文文本前端的输出进行分段
for part_phone_ids in phone_ids:
    with paddle.no_grad():
        temp_mel = fastspeech2_inference(part_phone_ids)
        if flags == 0:
            mel = temp_mel
            flags = 1
        else:
            mel = paddle.concat([mel, temp_mel])





#Vcoder声码器（谱图到波形）
#下面的代码显示了如何使用 Parallel WaveGAN 模型。像上面的例子一样，加载预训练模型后，使用它和 normalizer 对象构建预测对象，
# 然后使用 pwg_inference(mel) 生成原始音频（ wav 格式）。


from pathlib import Path
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
from paddlespeech.t2s.models.parallel_wavegan import PWGGenerator
from paddlespeech.t2s.models.parallel_wavegan import PWGInference
from paddlespeech.t2s.modules.normalizer import ZScore

# 加载预训练模型
checkpoint_dir = Path("F:/vs_code/file/pwg_baker_ckpt_0.4")
with open(checkpoint_dir / "pwg_default.yaml") as f:
    pwg_config = CfgNode(yaml.safe_load(f))
vocoder = PWGGenerator(**pwg_config["generator_params"])
vocoder.set_state_dict(paddle.load("F:/vs_code/file/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz"))
vocoder.remove_weight_norm()
vocoder.eval()

# 加载特征文件
stat = np.load(checkpoint_dir / "pwg_stats.npy")
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
pwg_normalizer = ZScore(mu, std)

# 加载预训练模型构造预测对象
pwg_inference = PWGInference(pwg_normalizer, vocoder)

# 频谱图到波形
wav = pwg_inference(mel)
sf.write(
        "F:/vs_code/paddlespeech_study/output/am01.wav",
        wav.numpy(),
        samplerate=fastspeech2_config.fs)


