Ailyth's picture
Update app.py
714116a
raw
history blame
No virus
6.63 kB
import sys, os
if sys.platform == "darwin":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import logging
logging.getLogger("numba").setLevel(logging.WARNING)
logging.getLogger("markdown_it").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s")
logger = logging.getLogger(__name__)
import torch
import argparse
import commons
import utils
from models import SynthesizerTrn
from text.symbols import symbols
from text import cleaned_text_to_sequence, get_bert
from text.cleaner import clean_text
import gradio as gr
import webbrowser
import soundfile as sf
from datetime import datetime
import pytz
net_g = None
models = {
"AdorableDarling": "./MODELS/adorabledarling.pth",
"Silverleg": "./MODELS/silverhandG_4400.pth",
"MoonLucidAloof": "./MODELS/G_2900.pth",
"Rrabbitt": "./MODELS/rabbit4900.pth",
"Mainlade": "./MODELS/DLM.pth",
}
def get_text(text, language_str, hps):
norm_text, phone, tone, word2ph = clean_text(text, language_str)
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
if hps.data.add_blank:
phone = commons.intersperse(phone, 0)
tone = commons.intersperse(tone, 0)
language = commons.intersperse(language, 0)
for i in range(len(word2ph)):
word2ph[i] = word2ph[i] * 2
word2ph[0] += 1
bert = get_bert(norm_text, word2ph, language_str)
del word2ph
assert bert.shape[-1] == len(phone)
phone = torch.LongTensor(phone)
tone = torch.LongTensor(tone)
language = torch.LongTensor(language)
return bert, phone, tone, language
def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, model_dir):
global net_g
bert, phones, tones, lang_ids = get_text(text, "ZH", hps)
with torch.no_grad():
x_tst=phones.to(device).unsqueeze(0)
tones=tones.to(device).unsqueeze(0)
lang_ids=lang_ids.to(device).unsqueeze(0)
bert = bert.to(device).unsqueeze(0)
x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
del phones
speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio
, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
sf.write("tmp.wav", audio, 44100)
return audio
def convert_wav_to_mp3(wav_file):
tz = pytz.timezone('Asia/Shanghai')
now = datetime.now(tz).strftime('%m%d%H%M%S')
os.makedirs('out', exist_ok=True)
output_path_mp3 = os.path.join('out', f"{now}.mp3")
renamed_input_path = os.path.join('in', f"in.wav")
os.makedirs('in', exist_ok=True)
os.rename(wav_file.name, renamed_input_path)
command = ["ffmpeg", "-i", renamed_input_path, "-acodec", "libmp3lame", "-y", output_path_mp3]
os.system(" ".join(command))
return output_path_mp3
def tts_generator(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, model):
global net_g
model_path = models[model]
net_g, _, _, _ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
with torch.no_grad():
audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker,model_dir=model)
with open('tmp.wav', 'rb') as wav_file:
mp3 = convert_wav_to_mp3(wav_file)
return "生成语音成功", (hps.data.sampling_rate, audio), mp3
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="", help="path of your model")
parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file")
parser.add_argument("--share", default=False, help="make link public")
parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log")
args = parser.parse_args()
if args.debug:
logger.info("Enable DEBUG-LEVEL log")
logging.basicConfig(level=logging.DEBUG)
hps = utils.get_hparams_from_file(args.config_dir)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).to(device)
_ = net_g.eval()
speaker_ids = hps.data.spk2id
speakers = list(speaker_ids.keys())
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
gr.Markdown(value="""
测试用
""")
text = gr.TextArea(label="Text", placeholder="Input Text Here",
value="在不在?能不能借给我三百块钱买可乐",info="使用huggingface的免费CPU进行推理,因此速度不快,一次性不要输入超过500汉字")
model = gr.Radio(choices=list(models.keys()), value=list(models.keys())[0], label='音声模型')
#model = gr.Dropdown(choices=models,value=models[0], label='音声模型')
speaker = gr.Radio(choices=speakers, value=speakers[0], label='Speaker')
gr.Markdown("生成参数,效果玄学")
sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label='语调变化')
noise_scale = gr.Slider(minimum=0.1, maximum=1.5, value=0.5, step=0.01, label='感情变化')
noise_scale_w = gr.Slider(minimum=0.1, maximum=1.4, value=0.9, step=0.01, label='音节长度')
length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='生成语音总长度')
btn = gr.Button("生成", variant="primary")
with gr.Column():
text_output = gr.Textbox(label="Message")
audio_output = gr.Audio(label="试听")
MP3_output = gr.File(label="下载")
gr.Markdown(value="""
""")
btn.click(tts_generator,
inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, model],
outputs=[text_output, audio_output,MP3_output])
app.launch(show_error=True)