tts-koni / inference.py
zhoucr's picture
First model version
7c6b117
import os
import json
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import commons
import utils
from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
from models import SynthesizerTrn
from text.symbols import symbols
from text import text_to_sequence, cleaned_text_to_sequence
from text.cleaners import japanese_cleaners
from scipy.io.wavfile import write
def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
# print(text_norm.shape)
return text_norm
hps = utils.get_hparams_from_file("/mnt/vits_koni/configs/japanese_base.json")
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).cuda()
_ = net_g.eval()
_ = utils.load_checkpoint("/mnt/vits_koni/MyDrive/japanese_base/G_42000.pth", net_g, None)
def tts(text):
if len(text) > 150:
return "Error: Text is too long", None
stn_tst = get_text(text, hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
# print(stn_tst.size())
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=2)[0][
0, 0].data.cpu().float().numpy()
return hps.data.sampling_rate, audio
sampling_rate, infer_audio = tts("にーまーまーすーろーぁ")
write("/mnt/vits_koni/MyDrive/japanese_base/inferwav/konitest3.wav", sampling_rate, infer_audio)
print("1")