Ahsen Khaliq commited on
Commit
098bdf9
1 Parent(s): d882fc0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -3
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import gradio as gr
2
 
3
  title = "FastSpeech2"
@@ -7,10 +9,47 @@ description = "Gradio Demo for fairseq S^2: A Scalable and Integrable Speech Syn
7
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.06912' target='_blank'>fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis' target='_blank'>Github Repo</a></p>"
8
 
9
  examples = [
10
- ["Hello this is a test run"]
11
  ]
12
 
13
- gr.Interface.load("huggingface/facebook/fastspeech2-en-200_speaker-cv4", examples=examples,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  article=article,
15
  title=title,
16
- description=description).launch(enable_queue=True)
 
1
+ import os
2
+ os.system("pip install gradio==2.4.6")
3
  import gradio as gr
4
 
5
  title = "FastSpeech2"
 
9
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.06912' target='_blank'>fairseq S^2: A Scalable and Integrable Speech Synthesis Toolkit</a> | <a href='https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis' target='_blank'>Github Repo</a></p>"
10
 
11
  examples = [
12
+ ["Hello this is a test run","fastspeech2-en-200_speaker-cv4"]
13
  ]
14
 
15
+ io1 = gr.Interface.load("huggingface/facebook/fastspeech2-en-200_speaker-cv4")
16
+
17
+ io2 = gr.Interface.load("huggingface/facebook/tts_transformer-en-200_speaker-cv4")
18
+
19
+ io3 = gr.Interface.load("huggingface/facebook/tts_transformer-zh-cv7_css10")
20
+
21
+ io4 = gr.Interface.load("huggingface/facebook/tts_transformer-fr-cv7_css10")
22
+
23
+ io5 = gr.Interface.load("huggingface/facebook/tts_transformer-ru-cv7_css10")
24
+
25
+ io6 = gr.Interface.load("huggingface/facebook/tts_transformer-tr-cv7")
26
+
27
+
28
+
29
+
30
+
31
+
32
+ def inference(text,model):
33
+ if model == "fastspeech2-en-200_speaker-cv4":
34
+ outtext = io1(text)
35
+ elif model == "tts_transformer-en-200_speaker-cv4":
36
+ outtext = io2(text)
37
+ elif model == "tts_transformer-zh-cv7_css10":
38
+ outtext = io3(text)
39
+ elif model == "tts_transformer-fr-cv7_css10":
40
+ outtext = io4(text)
41
+ elif model == "tts_transformer-ru-cv7_css10":
42
+ outtext = io5(text)
43
+ else:
44
+ outtext = io6(text)
45
+ return outtext
46
+
47
+
48
+ gr.Interface(
49
+ inference,
50
+ [gr.inputs.Textbox(label="Input",lines=5),gr.inputs.Dropdown(choices=["fastspeech2-en-200_speaker-cv4","tts_transformer-en-200_speaker-cv4","tts_transformer-zh-cv7_css10","tts_transformer-zh-cv7_css10","tts_transformer-fr-cv7_css10","tts_transformer-ru-cv7_css10"], type="value", default="fastspeech2-en-200_speaker-cv4", label="model")
51
+ ],
52
+ examples=examples,
53
  article=article,
54
  title=title,
55
+ description=description).launch(enable_queue=True, cache_examples=True)