Ahsen Khaliq commited on
Commit
cf5f5dd
1 Parent(s): 746626a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ title = "FastSpeech2"
4
+
5
+ description = "Gradio Demo for fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
6
+
7
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.06912' target='_blank'>fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis' target='_blank'>Github Repo</a></p>"
8
+
9
+ examples = [
10
+ ["Hello, this is a test run.","fastspeech2-en-200_speaker-cv4"]
11
+ ]
12
+
13
+ io1 = gr.Interface.load("huggingface/facebook/fastspeech2-en-200_speaker-cv4")
14
+
15
+ io2 = gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech")
16
+
17
+
18
+
19
+
20
+ def inference(text, model):
21
+ if model == "fastspeech2-en-200_speaker-cv4":
22
+ audio = io1(text)
23
+ else:
24
+ audio = io2(text)
25
+ return audio
26
+
27
+ gr.Interface(
28
+ inference,
29
+ [gr.inputs.Textbox(label="Input", lines=10),gr.inputs.Dropdown(choices=["fastspeech2-en-200_speaker-cv4","fastspeech2-en-ljspeech"], type="value", default="prophetnet-large-uncased", label="model")
30
+ ],
31
+ gr.outputs.Audio(label="Output"),
32
+ examples=examples,
33
+ article=article,
34
+ title=title,
35
+ description=description).launch(enable_queue=True, cache_examples=True)