|
import gradio as gr |
|
from infer_onnx import TTS |
|
from ruaccent import RUAccent |
|
|
|
models = ["TeraTTS/natasha-g2p-vits", "TeraTTS/glados2-g2p-vits"] |
|
|
|
models = {k:TTS(k) for k in models} |
|
|
|
accentizer = RUAccent(workdir="./model/ruaccent") |
|
accentizer.load(omograph_model_size='medium', dict_load_startup=True) |
|
|
|
|
|
def process_text(text: str) -> str: |
|
text = accentizer.process_all(text) |
|
return text |
|
|
|
def text_to_speech(model_name, text, prep_text): |
|
if prep_text: |
|
text = process_text(text) |
|
audio = models[model_name](text) |
|
models[model_name].save_wav(audio, 'temp.wav') |
|
|
|
return 'temp.wav', f"Обработанный текст: '{text}'" |
|
|
|
model_choice = gr.Dropdown(choices=list(models.keys()), value="TeraTTS/natasha-g2p-vits", label="Выберите модель") |
|
input_text = gr.Textbox(label="Введите текст для синтеза речи") |
|
prep_text = gr.Checkbox(label="Предобработать", info="Хотите пред обработать текст?(Ударения, ё)", value=True) |
|
|
|
output_audio = gr.Audio(label="Аудио", type="numpy") |
|
output_text = gr.Textbox(label="Обработанный текст") |
|
|
|
iface = gr.Interface(fn=text_to_speech, inputs=[model_choice, input_text, prep_text], outputs=[output_audio, output_text]) |
|
iface.launch() |