import gradio as gr import numpy as np import torch import os import re_matching from tools.sentence import split_by_language, sentence_split import utils from infer import infer, latest_version, get_net_g import gradio as gr import webbrowser from config import config from tools.translate import translate from tools.webui import reload_javascript device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def speak_fn( text: str, exceed_flag, speaker="TalkFlower_CNzh", sdp_ratio=0.2, # SDP/DP混合比 noise_scale=0.6, # 感情 noise_scale_w=0.6, # 音素长度 length_scale=0.9, # 语速 language="ZH", interval_between_para=0.2, # 段间间隔 interval_between_sent=1, # 句间间隔 ): while text.find("\n\n") != -1: text = text.replace("\n\n", "\n") if len(text) > 100: print(f"Too Long Text: {text}") gr.Warning("Too long! No more than 100 characters. 一口气不要超过 100 个字,憋坏我了。") if exceed_flag: return gr.update(value="./assets/audios/nomorethan100.wav", autoplay=True), False else: return gr.update(value="./assets/audios/overlength.wav", autoplay=True), True audio_list = [] if len(text) > 42: print(f"Long Text: {text}") para_list = re_matching.cut_para(text) for p in para_list: audio_list_sent = [] sent_list = re_matching.cut_sent(p) for s in sent_list: audio = infer( s, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker, language=language, hps=hps, net_g=net_g, device=device, ) audio_list_sent.append(audio) silence = np.zeros((int)(44100 * interval_between_sent)) audio_list_sent.append(silence) if (interval_between_para - interval_between_sent) > 0: silence = np.zeros( (int)(44100 * (interval_between_para - interval_between_sent)) ) audio_list_sent.append(silence) audio16bit = gr.processing_utils.convert_to_16_bit_wav( np.concatenate(audio_list_sent) ) # 对完整句子做音量归一 audio_list.append(audio16bit) else: print(f"Short Text: {text}") silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for piece in text.split("|"): audio = infer( piece, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker, language=language, hps=hps, net_g=net_g, device=device, ) audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio) audio_list.append(audio16bit) audio_list.append(silence) # 将静音添加到列表中 audio_concat = np.concatenate(audio_list) return (hps.data.sampling_rate, audio_concat), exceed_flag def init_fn(): gr.Info("2023-11-23: 花钱买了稍微好一点的服务器,现在生成应该更快了。") gr.Info("2023-11-24: 优化长句生成效果;增加示例;更新了一些小彩蛋。") gr.Info("Only support Chinese now. Trying to train a mutilingual model.") with open("./css/style.css", "r", encoding="utf-8") as f: customCSS = f.read() with gr.Blocks(css=customCSS) as demo: exceed_flag = gr.State(value=False) talkingFlowerPic = gr.HTML("""TalkingFlowerPic""", elem_id="talking_flower_pic") input_text = gr.Textbox(lines=1, label="Talking Flower will say:", elem_classes="wonder-card", elem_id="input_text") speak_button = gr.Button("Speak!", elem_id="speak_button", elem_classes="button wonder-card") gr.Examples(["你今天好棒", "雄蕊羊痒的", "我一朵花好害怕", "再来找我玩哦", "冲呀冲呀!", "塔塔开!一字摸塔塔开!", "好了是闺蜜,不好嘞是敌咪"], label=None, inputs=[input_text], elem_id="examples") audio_output = gr.Audio(label="输出音频", show_label=False, autoplay=True, elem_id="audio_output", elem_classes="wonder-card") demo.load( init_fn, inputs=[], outputs=[] ) input_text.submit( speak_fn, inputs=[input_text, exceed_flag], outputs=[audio_output, exceed_flag], ) speak_button.click( speak_fn, inputs=[input_text, exceed_flag], outputs=[audio_output, exceed_flag], ) if __name__ == "__main__": hps = utils.get_hparams_from_file(config.webui_config.config_path) version = hps.version if hasattr(hps, "version") else latest_version net_g = get_net_g(model_path=config.webui_config.model, version=version, device=device, hps=hps) reload_javascript() demo.launch( allowed_paths=["./assets"], show_api=False, # server_name=server_name, # server_port=server_port, inbrowser=True, )