akhaliq's picture
akhaliq HF staff
Update app.py
b8c6cbb
import gradio as gr
title = "fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit"
description = "Gradio Demo for fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.06912' target='_blank'>fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis' target='_blank'>Github Repo</a></p>"
examples = [
["Hello this is a test run","fastspeech2-en-200_speaker-cv4"],
["Hello, this is a test run.","tts_transformer-en-200_speaker-cv4"],
["Bonjour, ceci est un test.","tts_transformer-fr-cv7_css10"],
["BЗдравствуйте, это пробный запуск.","tts_transformer-ru-cv7_css10"],
["Merhaba, bu bir deneme çalışmasıdır.","tts_transformer-tr-cv7"],
["Xin chào, đây là một cuộc chạy thử nghiệm.","tts_transformer-vi-cv7"],
["مرحبًا ، هذا اختبار تشغيل.","tts_transformer-ar-cv7"],
["Hola, esta es una prueba.","tts_transformer-es-css10"]
]
io1 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-es_en-multi_domain")
io2 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-ru_en-multi_domain")
io3 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_ru-multi_domain")
io4 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_es-multi_domain")
io5 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_zh-multi_domain")
io6 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-fr_en-multi_domain")
io7 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_ar-multi_domain")
io8 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_tr-multi_domain")
def inference(text,model):
if model == "xm_transformer_600m-es_en-multi_domain":
outtext = io1(text)
elif model == "xm_transformer_600m-ru_en-multi_domain":
outtext = io2(text)
elif model == "xm_transformer_600m-en_ru-multi_domain":
outtext = io3(text)
elif model == "xm_transformer_600m-en_es-multi_domain":
outtext = io4(text)
elif model == "xm_transformer_600m-en_zh-multi_domain":
outtext = io5(text)
elif model == "xm_transformer_600m-fr_en-multi_domain":
outtext = io6(text)
elif model == "xm_transformer_600m-en_ar-multi_domain":
outtext = io7(text)
else:
outtext = io8(text)
return outtext
gr.Interface(
inference,
[gr.inputs.Audio(label="Input"),gr.inputs.Dropdown(choices=["xm_transformer_600m-es_en-multi_domain","xm_transformer_600m-ru_en-multi_domain","xm_transformer_600m-en_ru-multi_domain","xm_transformer_600m-en_es-multi_domain","xm_transformer_600m-en_zh-multi_domain","xm_transformer_600m-fr_en-multi_domain","xm_transformer_600m-en_ar-multi_domain","facebook/xm_transformer_600m-en_tr-multi_domain"], type="value", default="xm_transformer_600m-es_en-multi_domain", label="model")
],
gr.outputs.Audio(label="Output"),
examples=examples,
article=article,
title=title,
description=description).launch(enable_queue=True)