|
import argparse |
|
import datetime |
|
import json |
|
import os |
|
import sys |
|
from typing import Optional |
|
|
|
import gradio as gr |
|
import torch |
|
import yaml |
|
|
|
from common.constants import ( |
|
DEFAULT_ASSIST_TEXT_WEIGHT, |
|
DEFAULT_LENGTH, |
|
DEFAULT_LINE_SPLIT, |
|
DEFAULT_NOISE, |
|
DEFAULT_NOISEW, |
|
DEFAULT_SDP_RATIO, |
|
DEFAULT_SPLIT_INTERVAL, |
|
DEFAULT_STYLE, |
|
DEFAULT_STYLE_WEIGHT, |
|
Languages, |
|
) |
|
from common.log import logger |
|
from common.tts_model import ModelHolder |
|
from infer import InvalidToneError |
|
from text.japanese import g2kata_tone, kata_tone2phone_tone, text_normalize |
|
|
|
is_hf_spaces = os.getenv("SYSTEM") == "spaces" |
|
limit = 100 |
|
|
|
|
|
with open(os.path.join("configs", "paths.yml"), "r", encoding="utf-8") as f: |
|
path_config: dict[str, str] = yaml.safe_load(f.read()) |
|
|
|
assets_root = path_config["assets_root"] |
|
|
|
languages = [l.value for l in Languages] |
|
|
|
|
|
def tts_fn( |
|
model_name, |
|
model_path, |
|
text, |
|
language, |
|
reference_audio_path, |
|
sdp_ratio, |
|
noise_scale, |
|
noise_scale_w, |
|
length_scale, |
|
line_split, |
|
split_interval, |
|
assist_text, |
|
assist_text_weight, |
|
use_assist_text, |
|
style, |
|
style_weight, |
|
kata_tone_json_str, |
|
use_tone, |
|
speaker, |
|
): |
|
if language == "JP" or language == "ZH": |
|
if is_hf_spaces and len(text) > limit: |
|
logger.error(f"Text is too long: {len(text)}") |
|
return ( |
|
f"Error: 文字数が多すぎます({limit}文字以下にしてください)/Text is too long (Please keep it under {limit} characters)", |
|
None, |
|
kata_tone_json_str, |
|
) |
|
else: |
|
if is_hf_spaces and len(text) > limit*8: |
|
logger.error(f"Text is too long: {len(text)}") |
|
return ( |
|
f"Error: 文字数が多すぎます({limit*8}文字以下にしてください/Text is too long (Please keep it under {limit*8} characters)", |
|
None, |
|
kata_tone_json_str, |
|
) |
|
model_holder.load_model_gr(model_name, model_path) |
|
|
|
wrong_tone_message = "" |
|
kata_tone: Optional[list[tuple[str, int]]] = None |
|
if use_tone and kata_tone_json_str != "": |
|
if language != "JP": |
|
logger.warning("Only Japanese is supported for tone generation.") |
|
wrong_tone_message = "アクセント指定は現在日本語のみ対応しています。/Only Japanese is supported for tone generation." |
|
if line_split: |
|
logger.warning("Tone generation is not supported for line split.") |
|
wrong_tone_message = ( |
|
"アクセント指定は改行で分けて生成を使わない場合のみ対応しています。/Tone generation is not supported for line split." |
|
) |
|
try: |
|
kata_tone = [] |
|
json_data = json.loads(kata_tone_json_str) |
|
|
|
for kana, tone in json_data: |
|
assert isinstance(kana, str) and tone in (0, 1), f"{kana}, {tone}" |
|
kata_tone.append((kana, tone)) |
|
except Exception as e: |
|
logger.warning(f"Error occurred when parsing kana_tone_json: {e}") |
|
wrong_tone_message = f"アクセント指定が不正です/Invalid accent specification: {e}" |
|
kata_tone = None |
|
|
|
|
|
tone: Optional[list[int]] = None |
|
if kata_tone is not None: |
|
phone_tone = kata_tone2phone_tone(kata_tone) |
|
tone = [t for _, t in phone_tone] |
|
|
|
speaker_id = model_holder.current_model.spk2id[speaker] |
|
|
|
start_time = datetime.datetime.now() |
|
|
|
try: |
|
sr, audio = model_holder.current_model.infer( |
|
text=text, |
|
language=language, |
|
reference_audio_path=reference_audio_path, |
|
sdp_ratio=sdp_ratio, |
|
noise=noise_scale, |
|
noisew=noise_scale_w, |
|
length=length_scale, |
|
line_split=line_split, |
|
split_interval=split_interval, |
|
assist_text=assist_text, |
|
assist_text_weight=assist_text_weight, |
|
use_assist_text=use_assist_text, |
|
style=style, |
|
style_weight=style_weight, |
|
given_tone=tone, |
|
sid=speaker_id, |
|
) |
|
except InvalidToneError as e: |
|
logger.error(f"Tone error: {e}") |
|
return f"Error: アクセント指定が不正です/Invalid accent specification:\n{e}", None, kata_tone_json_str |
|
except ValueError as e: |
|
logger.error(f"Value error: {e}") |
|
return f"Error: {e}", None, kata_tone_json_str |
|
|
|
end_time = datetime.datetime.now() |
|
duration = (end_time - start_time).total_seconds() |
|
|
|
if tone is None and language == "JP": |
|
|
|
norm_text = text_normalize(text) |
|
kata_tone = g2kata_tone(norm_text) |
|
kata_tone_json_str = json.dumps(kata_tone, ensure_ascii=False) |
|
elif tone is None: |
|
kata_tone_json_str = "" |
|
message = f"Success, time: {duration} seconds." |
|
if wrong_tone_message != "": |
|
message = wrong_tone_message + "\n" + message |
|
return message, (sr, audio), kata_tone_json_str |
|
|
|
|
|
initial_text = "かわいいって言ってくれてうれしいな。あなたと一緒にいると落ち着くな。ずっと一緒にいようね。約束だよ。" |
|
|
|
example_hf_spaces = [ |
|
[initial_text, "JP"], |
|
["あなたのことが好きだよ!だーいすき!私と付き合ってくれないかな。だめかな?きっと幸せだって言わせてみせるから!", "JP"], |
|
["機械学習を用いた音声合成により、自然な発声が可能になっています。音素の持続時間などの発声の特徴を解析します。これにより、話者による微妙な表現の違いも再現することができます。", "JP"], |
|
["今日もお疲れさま。お仕事頑張れてえらいね。頑張ったあなたにはご褒美をあげないとだよね。あなたの好きな耳かきをしてあげる。それじゃあ、そこに横になってね。", "JP"], |
|
["I love you more than anything in the world. Let's stay together from now on. I will definitely make you happy. I'll cook you delicious food every day. I love you. Do you like me?", "EN"], |
|
["我爱你胜过世界上的一切。 我们会永远在一起 我一定会让你幸福。 我会每天给你做好吃的。 我爱你 你喜欢我吗?", "ZH"] |
|
] |
|
initial_md = """ |
|
# Style-Bert-VITS2 Rikka Botan Model デモ |
|
入力テキストの意味に応じて感情豊かな読み上げを生成できます。 |
|
学習データは開発者の声(子供っぽいおっとりした声質)を利用しています。 |
|
言語は日本語(JP)、英語(EN)、中国語(簡体字)(ZH)に対応しています。(同一話者によるマルチリンガルモデルは稀です) |
|
入力上限文字数は100文字(英語は800文字)までにしています。(お試しデモ用) |
|
このSpaceではCPUで動作させているため、生成に時間がかかります。 |
|
モデルのページのコードをGPU環境で使用することをお勧めします。(google colabを用いれば簡単にできます) |
|
もしもっと長いものを生成したい場合は、公開しているモデル([Rikka Botan](https://huggingface.co/RikkaBotan))をご使用ください(無料)。 |
|
このモデルは無料で商用利用も可能であり、R-18,R-18G作品への利用も可能です。 |
|
基本的に、他者へ悪意を持って危害・損害を与える行為や違法な行為以外であれば、自由に使用していただいて問題ありません。 |
|
(詳しくはモデルページの記載をご覧ください。) |
|
|
|
It can generate emotional readings based on the meaning of the input text. |
|
The training data uses the voices of developers. |
|
The maximum number of characters that can be entered is 100 characters(In English, 800). (For trial demo) |
|
This Space runs on the CPU, so it takes time to generate. |
|
We recommend using the code on the model's page in a GPU environment. |
|
If you want to generate something longer, please use the published models ([Rikka Botan](https://huggingface.co/RikkaBotan))(free). |
|
This model is free for commercial use and can be used in R-18 and R-18G works. |
|
Basically, you are free to use this model as long as you do not maliciously harm or damage others or engage in illegal activities. |
|
(For more details, please see the description on the model page.) |
|
""" |
|
|
|
style_md = f""" |
|
- プリセットまたは音声ファイルから読み上げの声音・感情・スタイルのようなものを制御できます。 |
|
- デフォルトの{DEFAULT_STYLE}でも、十分に読み上げる文に応じた感情で感情豊かに読み上げられます。このスタイル制御は、それを重み付きで上書きするような感じです。 |
|
- 強さを大きくしすぎると発音が変になったり声にならなかったりと崩壊することがあります。 |
|
- どのくらいに強さがいいかはモデルやスタイルによって異なるようです。 |
|
- 音声ファイルを入力する場合は、学習データと似た声音の話者(特に同じ性別)でないとよい効果が出ないかもしれません。 |
|
|
|
- You can control things like reading voice, emotion, and style from presets or audio files. |
|
- Even with the default {DEFAULT_STYLE}, the text will be read out in a rich and emotional way, with the emotion appropriate to the sentence being read. This style control is like overriding that with weights. |
|
- If you increase the strength too much, your pronunciation may become strange or you may not be able to hear it properly. |
|
- The desired strength seems to vary depending on the model and style. |
|
- When inputting an audio file, good results may not be obtained unless the speaker has a voice similar to that of the training data (especially the same gender) |
|
""" |
|
|
|
|
|
def make_interactive(): |
|
return gr.update(interactive=True, value="音声合成/Speech synthesis") |
|
|
|
|
|
def make_non_interactive(): |
|
return gr.update(interactive=False, value="音声合成(モデルをロードしてください)/Speech synthesis(Please load the model)") |
|
|
|
|
|
def gr_util(item): |
|
if item == "プリセットから選ぶ/Select from preset": |
|
return (gr.update(visible=True), gr.Audio(visible=False, value=None)) |
|
else: |
|
return (gr.update(visible=False), gr.update(visible=True)) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--cpu", action="store_true", help="Use CPU instead of GPU") |
|
parser.add_argument( |
|
"--dir", "-d", type=str, help="Model directory", default=assets_root |
|
) |
|
parser.add_argument( |
|
"--share", action="store_true", help="Share this app publicly", default=False |
|
) |
|
parser.add_argument( |
|
"--server-name", |
|
type=str, |
|
default=None, |
|
help="Server name for Gradio app", |
|
) |
|
parser.add_argument( |
|
"--no-autolaunch", |
|
action="store_true", |
|
default=False, |
|
help="Do not launch app automatically", |
|
) |
|
args = parser.parse_args() |
|
model_dir = args.dir |
|
|
|
if args.cpu: |
|
device = "cpu" |
|
else: |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
model_holder = ModelHolder(model_dir, device) |
|
|
|
model_names = model_holder.model_names |
|
if len(model_names) == 0: |
|
logger.error( |
|
f"モデルが見つかりませんでした。{model_dir}にモデルを置いてください。/ Not found models. Please put models in {model_dir}" |
|
) |
|
sys.exit(1) |
|
initial_id = 0 |
|
initial_pth_files = model_holder.model_files_dict[model_names[initial_id]] |
|
|
|
with gr.Blocks(theme="NoCrypt/miku") as app: |
|
gr.Markdown(initial_md) |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
model_name = gr.Dropdown( |
|
label="モデル一覧/models", |
|
choices=model_names, |
|
value=model_names[initial_id], |
|
) |
|
model_path = gr.Dropdown( |
|
label="モデルファイル/model file", |
|
choices=initial_pth_files, |
|
value=initial_pth_files[0], |
|
) |
|
refresh_button = gr.Button("更新/update", scale=1, visible=False) |
|
load_button = gr.Button("ロード/load", scale=1, variant="primary") |
|
text_input = gr.TextArea(label="テキスト/text", value=initial_text) |
|
|
|
line_split = gr.Checkbox( |
|
label="改行で分けて生成(分けたほうが感情が乗ります)/Generate separated by line breaks (separating them gives more emotionally)", |
|
value=DEFAULT_LINE_SPLIT, |
|
) |
|
split_interval = gr.Slider( |
|
minimum=0.0, |
|
maximum=2, |
|
value=DEFAULT_SPLIT_INTERVAL, |
|
step=0.1, |
|
label="改行ごとに挟む無音の長さ(秒)/Silence time length per separations", |
|
) |
|
line_split.change( |
|
lambda x: (gr.Slider(visible=x)), |
|
inputs=[line_split], |
|
outputs=[split_interval], |
|
) |
|
tone = gr.Textbox( |
|
label="アクセント調整(数値は 0=低 か1=高 のみ)/Accent adjustment(0 or 1)", |
|
info="改行で分けない場合のみ使えます。万能ではありません。/ Only when not using separations", |
|
) |
|
use_tone = gr.Checkbox(label="アクセント調整を使う/Use accent adjustment", value=False) |
|
use_tone.change( |
|
lambda x: (gr.Checkbox(value=False) if x else gr.Checkbox()), |
|
inputs=[use_tone], |
|
outputs=[line_split], |
|
) |
|
language = gr.Dropdown(choices=["JP","EN","ZH"], value=["JP","EN","ZH"], label="Language") |
|
speaker = gr.Dropdown(label="話者/Speaker") |
|
with gr.Accordion(label="詳細設定/details", open=False): |
|
sdp_ratio = gr.Slider( |
|
minimum=0, |
|
maximum=1, |
|
value=DEFAULT_SDP_RATIO, |
|
step=0.1, |
|
label="SDP Ratio", |
|
) |
|
noise_scale = gr.Slider( |
|
minimum=0.1, |
|
maximum=2, |
|
value=DEFAULT_NOISE, |
|
step=0.1, |
|
label="Noise", |
|
) |
|
noise_scale_w = gr.Slider( |
|
minimum=0.1, |
|
maximum=2, |
|
value=DEFAULT_NOISEW, |
|
step=0.1, |
|
label="Noise_W", |
|
) |
|
length_scale = gr.Slider( |
|
minimum=0.1, |
|
maximum=2, |
|
value=DEFAULT_LENGTH, |
|
step=0.1, |
|
label="Length", |
|
) |
|
use_assist_text = gr.Checkbox( |
|
label="Assist textを使う", value=False |
|
) |
|
assist_text = gr.Textbox( |
|
label="Assist text", |
|
placeholder="やったー!嬉しいな!好きだよっ!", |
|
info="このテキストの読み上げと似た声音・感情になりやすくなります。ただ抑揚やテンポ等が犠牲になる傾向があります。/This will likely result in a similar voice and emotion as when reading this text. However, there is a tendency for things like intonation and tempo to be sacrificed.", |
|
visible=False, |
|
) |
|
assist_text_weight = gr.Slider( |
|
minimum=0, |
|
maximum=1, |
|
value=DEFAULT_ASSIST_TEXT_WEIGHT, |
|
step=0.1, |
|
label="Assist textの強さ/Assist text power", |
|
visible=False, |
|
) |
|
use_assist_text.change( |
|
lambda x: (gr.Textbox(visible=x), gr.Slider(visible=x)), |
|
inputs=[use_assist_text], |
|
outputs=[assist_text, assist_text_weight], |
|
) |
|
with gr.Column(): |
|
with gr.Accordion("スタイルについて詳細/Details of styles", open=False): |
|
gr.Markdown(style_md) |
|
style_mode = gr.Radio( |
|
["プリセットから選ぶ/Select from preset", "音声ファイルを入力/Put in audio files"], |
|
label="スタイルの指定方法/How to select styles", |
|
value="プリセットから選ぶ/Select from preset", |
|
) |
|
style = gr.Dropdown( |
|
label=f"スタイル({DEFAULT_STYLE}が平均スタイル/Style({DEFAULT_STYLE} is standard))", |
|
choices=["モデルをロードしてください/Please load the model"], |
|
value="モデルをロードしてください/Please load the model", |
|
) |
|
style_weight = gr.Slider( |
|
minimum=0, |
|
maximum=50, |
|
value=DEFAULT_STYLE_WEIGHT, |
|
step=0.1, |
|
label="スタイルの強さ/Strength of style", |
|
) |
|
ref_audio_path = gr.Audio( |
|
label="参照音声/Reference audio", type="filepath", visible=False |
|
) |
|
tts_button = gr.Button( |
|
"音声合成(モデルをロードしてください)/Speech synthesis(Please load the model)", |
|
variant="primary", |
|
interactive=False, |
|
) |
|
text_output = gr.Textbox(label="情報/Information") |
|
audio_output = gr.Audio(label="結果/Result") |
|
with gr.Accordion("テキスト例/Sample text", open=True): |
|
gr.Examples(example_hf_spaces, inputs=[text_input, language]) |
|
|
|
tts_button.click( |
|
tts_fn, |
|
inputs=[ |
|
model_name, |
|
model_path, |
|
text_input, |
|
language, |
|
ref_audio_path, |
|
sdp_ratio, |
|
noise_scale, |
|
noise_scale_w, |
|
length_scale, |
|
line_split, |
|
split_interval, |
|
assist_text, |
|
assist_text_weight, |
|
use_assist_text, |
|
style, |
|
style_weight, |
|
tone, |
|
use_tone, |
|
speaker, |
|
], |
|
outputs=[text_output, audio_output, tone], |
|
) |
|
|
|
model_name.change( |
|
model_holder.update_model_files_gr, |
|
inputs=[model_name], |
|
outputs=[model_path], |
|
) |
|
|
|
model_path.change(make_non_interactive, outputs=[tts_button]) |
|
|
|
refresh_button.click( |
|
model_holder.update_model_names_gr, |
|
outputs=[model_name, model_path, tts_button], |
|
) |
|
|
|
load_button.click( |
|
model_holder.load_model_gr, |
|
inputs=[model_name, model_path], |
|
outputs=[style, tts_button, speaker], |
|
) |
|
|
|
style_mode.change( |
|
gr_util, |
|
inputs=[style_mode], |
|
outputs=[style, ref_audio_path], |
|
) |
|
|
|
app.launch( |
|
inbrowser=not args.no_autolaunch, share=args.share, server_name=args.server_name |
|
) |
|
|