import argparse import os from pathlib import Path import logging import re_matching logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("markdown_it").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) import librosa import numpy as np import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import DataLoader, Dataset from tqdm import tqdm from clap_wrapper import get_clap_audio_feature, get_clap_text_feature import gradio as gr import utils from config import config import torch import commons from text import cleaned_text_to_sequence, get_bert from text.cleaner import clean_text import utils from models import SynthesizerTrn from text.symbols import symbols import sys net_g = None ''' device = ( "cuda:0" if torch.cuda.is_available() else ( "mps" if sys.platform == "darwin" and torch.backends.mps.is_available() else "cpu" ) ) ''' device = "cpu" BandList = { "PoppinParty":["香澄","有咲","たえ","りみ","沙綾"], "Afterglow":["蘭","モカ","ひまり","巴","つぐみ"], "HelloHappyWorld":["こころ","美咲","薫","花音","はぐみ"], "PastelPalettes":["彩","日菜","千聖","イヴ","麻弥"], "Roselia":["友希那","紗夜","リサ","燐子","あこ"], "RaiseASuilen":["レイヤ","ロック","ますき","チュチュ","パレオ"], "Morfonica":["ましろ","瑠唯","つくし","七深","透子"], "MyGo":["燈","愛音","そよ","立希","楽奈"], "AveMujica":["祥子","睦","海鈴","にゃむ","初華"], "圣翔音乐学园":["華戀","光","香子","雙葉","真晝","純那","克洛迪娜","真矢","奈奈"], "凛明馆女子学校":["珠緒","壘","文","悠悠子","一愛"], "弗隆提亚艺术学校":["艾露","艾露露","菈樂菲","司","靜羽"], "西克菲尔特音乐学院":["晶","未知留","八千代","栞","美帆"] } def get_net_g(model_path: str, device: str, hps): # 当前版本模型 net_g net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ).to(device) _ = net_g.eval() _ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True) return net_g def get_text(text, language_str, hps, device, style_text=None, style_weight=0.7): style_text = None if style_text == "" else style_text # 在此处实现当前版本的get_text norm_text, phone, tone, word2ph = clean_text(text, language_str) phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) if hps.data.add_blank: phone = commons.intersperse(phone, 0) tone = commons.intersperse(tone, 0) language = commons.intersperse(language, 0) for i in range(len(word2ph)): word2ph[i] = word2ph[i] * 2 word2ph[0] += 1 bert = get_bert(norm_text, word2ph, language_str, device, style_text, style_weight) del word2ph assert bert.shape[-1] == len( phone ), f"Bert seq len {bert.shape[-1]} != {len(phone)}" phone = torch.LongTensor(phone) tone = torch.LongTensor(tone) language = torch.LongTensor(language) return bert, phone, tone, language def infer( text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, emotion, reference_audio=None, skip_start=False, skip_end=False, style_text=None, style_weight=0.7, ): language = "JP" if isinstance(reference_audio, np.ndarray): emo = get_clap_audio_feature(reference_audio, device) else: emo = get_clap_text_feature(emotion, device) emo = torch.squeeze(emo, dim=1) bert, phones, tones, lang_ids = get_text( text, language, hps, device, style_text=style_text, style_weight=style_weight, ) if skip_start: phones = phones[3:] tones = tones[3:] lang_ids = lang_ids[3:] bert = bert[:, 3:] if skip_end: phones = phones[:-2] tones = tones[:-2] lang_ids = lang_ids[:-2] bert = bert[:, :-2] with torch.no_grad(): x_tst = phones.to(device).unsqueeze(0) tones = tones.to(device).unsqueeze(0) lang_ids = lang_ids.to(device).unsqueeze(0) bert = bert.to(device).unsqueeze(0) x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) emo = emo.to(device).unsqueeze(0) del phones speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) print(text) audio = ( net_g.infer( x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, emo, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, )[0][0, 0] .data.cpu() .float() .numpy() ) del ( x_tst, tones, lang_ids, bert, x_tst_lengths, speakers, emo, ) # , emo if torch.cuda.is_available(): torch.cuda.empty_cache() return (hps.data.sampling_rate,gr.processing_utils.convert_to_16_bit_wav(audio)) def loadmodel(model): _ = net_g.eval() _ = utils.load_checkpoint(model, net_g, None, skip_optimizer=True) return "success" if __name__ == "__main__": modelPaths = [] for dirpath, dirnames, filenames in os.walk('Data/BangDream/models/'): for filename in filenames: modelPaths.append(os.path.join(dirpath, filename)) hps = utils.get_hparams_from_file('Data/BangDream//config.json') net_g = get_net_g( model_path=modelPaths[-1], device=device, hps=hps ) speaker_ids = hps.data.spk2id speakers = list(speaker_ids.keys()) with gr.Blocks() as app: for band in BandList: with gr.TabItem(band): for name in BandList[band]: with gr.TabItem(name): with gr.Row(): with gr.Column(): with gr.Row(): gr.Markdown( '
' f'' '
' ) length_scale = gr.Slider( minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节" ) emotion = gr.Textbox( label="情感标注文本t", value = 'なんではるひかげやったの?!!' ) style_weight = gr.Slider( minimum=0.1, maximum=2, value=1, step=0.01, label="感情比重" ) with gr.Accordion(label="参数设定", open=False): sdp_ratio = gr.Slider( minimum=0, maximum=1, value=0.2, step=0.01, label="SDP/DP混合比" ) noise_scale = gr.Slider( minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节" ) noise_scale_w = gr.Slider( minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度" ) speaker = gr.Dropdown( choices=speakers, value=name, label="说话人" ) skip_start = gr.Checkbox(label="跳过开头") skip_end = gr.Checkbox(label="跳过结尾") with gr.Accordion(label="切换模型", open=False): modelstrs = gr.Dropdown(label = "模型", choices = modelPaths, value = modelPaths[0], type = "value") btnMod = gr.Button("载入模型") statusa = gr.TextArea() btnMod.click(loadmodel, inputs=[modelstrs], outputs = [statusa]) with gr.Column(): text = gr.TextArea( label="输入纯日语", placeholder="输入纯日语", value="なんではるひかげやったの?!!", ) reference_audio = gr.Audio(label="情感参考音频)", type="filepath") btn = gr.Button("点击生成", variant="primary") audio_output = gr.Audio(label="Output Audio") btn.click( infer, inputs=[ text, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, emotion, reference_audio, skip_start, skip_end, emotion, style_weight, ], outputs=[audio_output], ) print("推理页面已开启!") app.launch(share=True)