Spaces:
Running
Running
import gradio as gr | |
import tempfile | |
from TTS.api import TTS | |
from huggingface_hub import hf_hub_download | |
import torch | |
CUDA = torch.cuda.is_available() | |
REPO_ID = "ayymen/Coqui-TTS-Vits-shi" | |
my_title = "Tamazight Text-to-Speech" | |
my_description = "This model is based on [VITS](https://github.com/jaywalnut310/vits), thanks to 🐸 [Coqui.ai](https://coqui.ai/)." | |
my_examples = [ | |
["ⴰⵣⵓⵍ. ⵎⴰⵏⵣⴰⴽⵉⵏ?"], | |
["ⵡⴰ ⵜⴰⵎⵖⴰⵔⵜ ⵎⴰ ⴷ ⵓⴽⴰⵏ ⵜⵙⴽⵔⵜ?"], | |
["ⴳⵏ ⴰⴷ ⴰⴽ ⵉⵙⵙⴳⵏ ⵕⴱⴱⵉ ⵉⵜⵜⵓ ⴽ."], | |
["ⴰⵔⵔⴰⵡ ⵏ ⵍⵀⵎⵎ ⵢⵓⴽⵔ ⴰⵖ ⵉⵀⴷⵓⵎⵏ ⵏⵏⵖ!"] | |
] | |
my_inputs = [ | |
gr.Textbox(lines=5, label="Input Text", placeholder="The only available characters are: ⴰⴱⴳⴷⴹⴻⴼⴽⵀⵃⵄⵅⵇⵉⵊⵍⵎⵏⵓⵔⵕⵖⵙⵚⵛⵜⵟⵡⵢⵣⵥⵯ !,.:?"), | |
gr.Audio(type="filepath", label="Speaker audio for voice cloning (optional)"), | |
gr.Checkbox(label="Split Sentences (each sentence will be generated separately)", value=True) | |
] | |
my_outputs = gr.Audio(type="filepath", label="Output Audio", autoplay=True) | |
best_model_path = hf_hub_download(repo_id=REPO_ID, filename="best_model.pth") | |
config_path = hf_hub_download(repo_id=REPO_ID, filename="config.json") | |
api = TTS(model_path=best_model_path, config_path=config_path).to("cuda" if CUDA else "cpu") | |
# load voice conversion model | |
api.load_vc_model_by_name("voice_conversion_models/multilingual/vctk/freevc24", gpu=CUDA) | |
def tts(text: str, speaker_wav: str = None, split_sentences: bool = True): | |
# replace oov characters | |
text = text.replace("\n", ". ") | |
text = text.replace("(", ",") | |
text = text.replace(")", ",") | |
text = text.replace(";", ",") | |
text = text.replace("-", " ") | |
with tempfile.NamedTemporaryFile(suffix = ".wav", delete = False) as fp: | |
if speaker_wav: | |
api.tts_with_vc_to_file(text, speaker_wav=speaker_wav, file_path=fp.name, split_sentences=split_sentences) | |
else: | |
api.tts_to_file(text, file_path=fp.name, split_sentences=split_sentences) | |
return fp.name | |
iface = gr.Interface( | |
fn=tts, | |
inputs=my_inputs, | |
outputs=my_outputs, | |
title=my_title, | |
description=my_description, | |
examples=my_examples, | |
cache_examples=True | |
) | |
iface.launch() | |