import sys, os sys.path.append('BV2') import torch import argparse import BV2.commons import BV2.utils from BV2.models import Synthesizer from BV2.text.symbols import symbols from BV2.text import cleaned_text_to_sequence, get_bert from BV2.text.cleaner import clean_text import gradio as gr import soundfile as sf from datetime import datetime import pytz tz = pytz.timezone('Asia/Shanghai') net_g = None models = { "Mellowdear": "./BV2/MODELS/adorabledarling.pth", "MistyNikki": "./BV2/MODELS/nikki9400.pth", "Silverleg": "./BV2/MODELS/J8900.pth", "Xelo": "./BV2/MODELS/HER_1100.pth", "Rrabbitt": "./BV2/MODELS/rabbit4900.pth", "VVV": "./BV2/MODELS/v3.pth", "AlluWin": "./BV2/MODELS/AW.pth", "Hypnosia": "./BV2/MODELS/hypno.pth", "PremJ": "./BV2/MODELS/premj.pth", "Umemura": "./BV2/MODELS/take2.pth", "ArasakaAI": "./BV2/MODELS/Arasaka.pth", "Terra": "./BV2/MODELS/TERRA.pth", } def get_text(text, language_str, hps): norm_text, phone, tone, word2ph = clean_text(text, language_str) phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) if hps.data.add_blank: phone = BV2.commons.intersperse(phone, 0) tone = BV2.commons.intersperse(tone, 0) language = BV2.commons.intersperse(language, 0) for i in range(len(word2ph)): word2ph[i] = word2ph[i] * 2 word2ph[0] += 1 bert = get_bert(norm_text, word2ph, language_str) del word2ph assert bert.shape[-1] == len(phone) phone = torch.LongTensor(phone) tone = torch.LongTensor(tone) language = torch.LongTensor(language) return bert, phone, tone, language def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, model_dir): global net_g bert, phones, tones, lang_ids = get_text(text, "ZH", HPS) with torch.no_grad(): x_tst=phones.to(devicee).unsqueeze(0) tones=tones.to(devicee).unsqueeze(0) lang_ids=lang_ids.to(devicee).unsqueeze(0) bert = bert.to(devicee).unsqueeze(0) x_tst_lengths = torch.LongTensor([phones.size(0)]).to(devicee) del phones speakers = torch.LongTensor([HPS.data.spk2id[sid]]).to(devicee) audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers sf.write("tmp.wav", audio, 44100) return audio def convert_wav_to_mp3(wav_file): now = datetime.now(tz).strftime('%m%d%H%M%S') os.makedirs('out', exist_ok=True) output_path_mp3 = os.path.join('out', f"{now}.mp3") renamed_input_path = os.path.join('in', f"in.wav") os.makedirs('in', exist_ok=True) os.rename(wav_file.name, renamed_input_path) command = ["ffmpeg", "-i", renamed_input_path, "-acodec", "libmp3lame", "-y", output_path_mp3] os.system(" ".join(command)) return output_path_mp3 def tts_generator(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, model): global net_g,speakers,tz now = datetime.now(tz).strftime('%m-%d %H:%M:%S') model_path = models[model] net_g, _, _, _ = BV2.utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True) print(f'✨{now}-开始生成:{text}') try: with torch.no_grad(): audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker,model_dir=model) with open('tmp.wav', 'rb') as wav_file: mp3 = convert_wav_to_mp3(wav_file) return "生成语音成功", (HPS.data.sampling_rate, audio), mp3 except Exception as e: return "生成语音失败:" + str(e), None, None current_dir = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(current_dir, "BV2/configs/config.json") if __name__ == "__main__": HPS = BV2.utils.get_hparams_from_file(config_path) devicee = "cuda:0" if torch.cuda.is_available() else "cpu" net_g = Synthesizer( len(symbols), HPS.data.filter_length // 2 + 1, HPS.train.segment_size // HPS.data.hop_length, n_speakers=HPS.data.n_speakers, **HPS.model).to(devicee) _ = net_g.eval() speaker_ids = HPS.data.spk2id speaker = list(speaker_ids.keys())[0] theme='remilia/Ghostly' with gr.Blocks(theme=theme) as app: with gr.Column(): with gr.Column(): gr.HTML('''

Use English to generate, please go to this SPACE

日本語で生成するために、こちらへ進んでください。

''') gr.HTML('''

以下仅供测试用,质量参差Only read Chinese

模型训练以及推理基于开源项目Bert-VITS2 (具体使用的是9月份的版本,可能后续项目效果更好,请自行尝试训练)

''') text = gr.TextArea(label="输入需要生成语音的文字(标点也会影响语气)", placeholder="输入文字", value="今天拿白金了吗", info="使用huggingface的免费CPU进行推理,因此速度不快,一次性不要输入超过500汉字。字数越多,生成速度越慢,请耐心等待,只会说中文。", ) model = gr.Radio(choices=list(models.keys()), value=list(models.keys())[0], label='声音模型') with gr.Accordion(label="展开设置生成参数", open=False): sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label='SDP/DP混合比',info='可控制一定程度的语调变化') noise_scale = gr.Slider(minimum=0.1, maximum=1.5, value=0.5, step=0.01, label='感情变化') noise_scale_w = gr.Slider(minimum=0.1, maximum=1.4, value=0.9, step=0.01, label='音节长度') length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='生成语音总长度',info='数值越大,语速越慢') btn = gr.Button("✨生成", variant="primary") with gr.Column(): audio_output = gr.Audio(label="试听") MP3_output = gr.File(label="💾下载") text_output = gr.Textbox(label="调试信息") gr.Markdown(""" """) btn.click( tts_generator, inputs=[text, sdp_ratio, noise_scale, noise_scale_w, length_scale, model], outputs=[text_output, audio_output,MP3_output] ) gr.HTML('''
visitor badge
''') app.launch(share=True)