akhaliq's picture
akhaliq HF staff
Update app.py
59b9a05
raw
history blame
3.01 kB
import os
os.system("pip install gradio==2.8.0b2")
import gradio as gr
import numpy as np
title = "XM_Tranformer"
description = "Gradio Demo for fairseq S2T: Fast Speech-to-Text Modeling with fairseq. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2010.05171' target='_blank'>fairseq S2T: Fast Speech-to-Text Modeling with fairseq</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_to_text' target='_blank'>Github Repo</a></p>"
examples = [
["common_voice_es_en.flac","xm_transformer_600m-es_en-multi_domain"],
["common_voice_ru_18945535.flac","xm_transformer_600m-ru_en-multi_domain"],
["common_voice_fr_19731305.mp3","xm_transformer_600m-fr_en-multi_domain"],
["common_voice_en_ru.mp3","xm_transformer_600m-en_ru-multi_domain"]
]
io1 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-es_en-multi_domain")
io2 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-ru_en-multi_domain")
io3 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_ru-multi_domain")
io4 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_es-multi_domain")
io5 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_zh-multi_domain")
io6 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-fr_en-multi_domain")
io7 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_ar-multi_domain")
io8 = gr.Interface.load("huggingface/facebook/xm_transformer_600m-en_tr-multi_domain")
def inference(text,model):
if model == "xm_transformer_600m-es_en-multi_domain":
outtext = io1(text)
elif model == "xm_transformer_600m-ru_en-multi_domain":
outtext = io2(text)
elif model == "xm_transformer_600m-en_ru-multi_domain":
outtext = io3(text)
elif model == "xm_transformer_600m-en_es-multi_domain":
outtext = io4(text)
elif model == "xm_transformer_600m-en_zh-multi_domain":
outtext = io5(text)
elif model == "xm_transformer_600m-fr_en-multi_domain":
outtext = io6(text)
elif model == "xm_transformer_600m-en_ar-multi_domain":
outtext = io7(text)
else:
outtext = io8(text)
return outtext
gr.Interface(
inference,
[gr.inputs.Audio(label="Input",type="filepath"),gr.inputs.Dropdown(choices=["xm_transformer_600m-es_en-multi_domain","xm_transformer_600m-ru_en-multi_domain","xm_transformer_600m-en_ru-multi_domain","xm_transformer_600m-en_es-multi_domain","xm_transformer_600m-en_zh-multi_domain","xm_transformer_600m-fr_en-multi_domain","xm_transformer_600m-en_ar-multi_domain","facebook/xm_transformer_600m-en_tr-multi_domain"], type="value", default="xm_transformer_600m-es_en-multi_domain", label="model")
],
gr.outputs.Audio(label="Output"),
article=article,
title=title,
examples=examples,
description=description).launch(enable_queue=True,cache_examples=True)