Spaces:
Running
Running
File size: 8,581 Bytes
70c3683 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
import datetime
from pathlib import Path
import gradio as gr
import random
from style_bert_vits2.constants import (
DEFAULT_LENGTH,
DEFAULT_LINE_SPLIT,
DEFAULT_NOISE,
DEFAULT_NOISEW,
DEFAULT_SPLIT_INTERVAL,
)
from style_bert_vits2.logging import logger
from style_bert_vits2.models.infer import InvalidToneError
from style_bert_vits2.nlp.japanese import pyopenjtalk_worker as pyopenjtalk
from style_bert_vits2.tts_model import TTSModelHolder
pyopenjtalk.initialize_worker()
example_file = "chupa_examples.txt"
initial_text = (
"ちゅぱ、ちゅるる、ぢゅ、んく、れーれゅれろれろれろ、じゅぽぽぽぽぽ……ちゅううう!"
)
with open(example_file, "r", encoding="utf-8") as f:
examples = f.read().splitlines()
def get_random_text() -> str:
return random.choice(examples)
initial_md = """
# チュパ音合成デモ
2024-07-07: initial ver
"""
def make_interactive():
return gr.update(interactive=True, value="音声合成")
def make_non_interactive():
return gr.update(interactive=False, value="音声合成(モデルをロードしてください)")
def gr_util(item):
if item == "プリセットから選ぶ":
return (gr.update(visible=True), gr.Audio(visible=False, value=None))
else:
return (gr.update(visible=False), gr.update(visible=True))
def create_inference_app(model_holder: TTSModelHolder) -> gr.Blocks:
def tts_fn(
model_name,
model_path,
text,
language,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
line_split,
split_interval,
speaker,
):
model_holder.get_model(model_name, model_path)
assert model_holder.current_model is not None
speaker_id = model_holder.current_model.spk2id[speaker]
start_time = datetime.datetime.now()
try:
sr, audio = model_holder.current_model.infer(
text=text,
language=language,
sdp_ratio=sdp_ratio,
noise=noise_scale,
noise_w=noise_scale_w,
length=length_scale,
line_split=line_split,
split_interval=split_interval,
speaker_id=speaker_id,
)
except InvalidToneError as e:
logger.error(f"Tone error: {e}")
return f"Error: アクセント指定が不正です:\n{e}", None
except ValueError as e:
logger.error(f"Value error: {e}")
return f"Error: {e}", None
end_time = datetime.datetime.now()
duration = (end_time - start_time).total_seconds()
message = f"Success, time: {duration} seconds."
return message, (sr, audio)
def get_model_files(model_name: str):
return [str(f) for f in model_holder.model_files_dict[model_name]]
model_names = model_holder.model_names
if len(model_names) == 0:
logger.error(
f"モデルが見つかりませんでした。{model_holder.root_dir}にモデルを置いてください。"
)
with gr.Blocks() as app:
gr.Markdown(
f"Error: モデルが見つかりませんでした。{model_holder.root_dir}にモデルを置いてください。"
)
return app
initial_pth_files = get_model_files(model_names[0])
model = model_holder.get_model(model_names[0], initial_pth_files[0])
speakers = list(model.spk2id.keys())
with gr.Blocks(theme="ParityError/Anime") as app:
gr.Markdown(initial_md)
with gr.Row():
with gr.Column():
with gr.Row():
with gr.Column(scale=3):
model_name = gr.Dropdown(
label="モデル一覧",
choices=model_names,
value=model_names[0],
)
model_path = gr.Dropdown(
label="モデルファイル",
choices=initial_pth_files,
value=initial_pth_files[0],
)
refresh_button = gr.Button("更新", scale=1, visible=False)
load_button = gr.Button("ロード", scale=1, variant="primary")
with gr.Row():
text_input = gr.TextArea(
label="テキスト", value=initial_text, scale=3
)
random_button = gr.Button("例から選ぶ 🎲", scale=1)
random_button.click(get_random_text, outputs=[text_input])
with gr.Row():
length_scale = gr.Slider(
minimum=0.1,
maximum=2,
value=DEFAULT_LENGTH,
step=0.1,
label="生成音声の長さ(Length)",
)
sdp_ratio = gr.Slider(
minimum=0,
maximum=1,
value=1,
step=0.1,
label="SDP Ratio",
)
line_split = gr.Checkbox(
label="改行で分けて生成(分けたほうが感情が乗ります)",
value=DEFAULT_LINE_SPLIT,
visible=False,
)
split_interval = gr.Slider(
minimum=0.0,
maximum=2,
value=DEFAULT_SPLIT_INTERVAL,
step=0.1,
label="改行ごとに挟む無音の長さ(秒)",
)
line_split.change(
lambda x: (gr.Slider(visible=x)),
inputs=[line_split],
outputs=[split_interval],
)
language = gr.Dropdown(
choices=["JP"], value="JP", label="Language", visible=False
)
speaker = gr.Dropdown(label="話者", choices=speakers, value=speakers[0])
with gr.Accordion(label="詳細設定", open=True):
noise_scale = gr.Slider(
minimum=0.1,
maximum=2,
value=DEFAULT_NOISE,
step=0.1,
label="Noise",
)
noise_scale_w = gr.Slider(
minimum=0.1,
maximum=2,
value=DEFAULT_NOISEW,
step=0.1,
label="Noise_W",
)
with gr.Column():
tts_button = gr.Button("音声合成", variant="primary")
text_output = gr.Textbox(label="情報")
audio_output = gr.Audio(label="結果")
tts_button.click(
tts_fn,
inputs=[
model_name,
model_path,
text_input,
language,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
line_split,
split_interval,
speaker,
],
outputs=[text_output, audio_output],
)
model_name.change(
model_holder.update_model_files_for_gradio,
inputs=[model_name],
outputs=[model_path],
)
model_path.change(make_non_interactive, outputs=[tts_button])
refresh_button.click(
model_holder.update_model_names_for_gradio,
outputs=[model_name, model_path, tts_button],
)
style = gr.Dropdown(label="スタイル", choices=[], visible=False)
load_button.click(
model_holder.get_model_for_gradio,
inputs=[model_name, model_path],
outputs=[style, tts_button, speaker],
)
return app
if __name__ == "__main__":
import torch
from style_bert_vits2.constants import Languages
from style_bert_vits2.nlp import bert_models
bert_models.load_model(Languages.JP)
bert_models.load_tokenizer(Languages.JP)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_holder = TTSModelHolder(Path("model_assets"), device)
app = create_inference_app(model_holder)
app.launch(inbrowser=True)
|