lpw commited on
Commit
71a6b0b
1 Parent(s): 8ead8df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -1,13 +1,20 @@
1
  import os
2
  import gradio as gr
3
  import numpy as np
4
-
 
 
 
5
 
6
  io1 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_en-hk", api_key=os.environ['api_key'])
7
  io2 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_hk-en", api_key=os.environ['api_key'])
8
  io3 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_en-hk", api_key=os.environ['api_key'])
9
  io4 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_hk-en", api_key=os.environ['api_key'])
10
-
 
 
 
 
11
  def inference(audio, model):
12
  if model == "xm_transformer_s2ut_en-hk":
13
  out_audio = io1(audio)
@@ -15,6 +22,8 @@ def inference(audio, model):
15
  out_audio = io2(audio)
16
  elif model == "xm_transformer_unity_en-hk":
17
  out_audio = io3(audio)
 
 
18
  else:
19
  out_audio = io4(audio)
20
  return out_audio
@@ -128,7 +137,7 @@ with block:
128
  )
129
 
130
  btn = gr.Button("Submit")
131
- model = gr.Dropdown(choices=["xm_transformer_unity_en-hk", "xm_transformer_unity_hk-en", "xm_transformer_s2ut_en-hk", "xm_transformer_s2ut_hk-en"], value="xm_transformer_unity_en-hk",type="value", label="Model")
132
  out = gr.Audio(label="Output")
133
 
134
  btn.click(inference, inputs=[audio, model], outputs=out)
 
1
  import os
2
  import gradio as gr
3
  import numpy as np
4
+ from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
5
+ from fairseq.models.speech_to_speech.hub_interface import S2SHubInterface
6
+ from fairseq.models.speech_to_text.hub_interface import S2THubInterface
7
+ form api-inference-community.docker_images.fairseq.app.pipelines.audio_to_audio import SpeechToSpeechPipeline
8
 
9
  io1 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_en-hk", api_key=os.environ['api_key'])
10
  io2 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_hk-en", api_key=os.environ['api_key'])
11
  io3 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_en-hk", api_key=os.environ['api_key'])
12
  io4 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_hk-en", api_key=os.environ['api_key'])
13
+
14
+ def call_model(audio, model):
15
+ pipe = SpeechToSpeechPipeline("facebook/xm_transformer_unity_hk-en")
16
+ return pipe(audio)
17
+
18
  def inference(audio, model):
19
  if model == "xm_transformer_s2ut_en-hk":
20
  out_audio = io1(audio)
 
22
  out_audio = io2(audio)
23
  elif model == "xm_transformer_unity_en-hk":
24
  out_audio = io3(audio)
25
+ elif model == "xm_transformer_unity_hk-en_gpu":
26
+ out_audio = call_model(audio, model)
27
  else:
28
  out_audio = io4(audio)
29
  return out_audio
 
137
  )
138
 
139
  btn = gr.Button("Submit")
140
+ model = gr.Dropdown(choices=["xm_transformer_unity_en-hk", "xm_transformer_unity_hk-en", "xm_transformer_unity_hk-en_gpu", "xm_transformer_s2ut_en-hk", "xm_transformer_s2ut_hk-en"], value="xm_transformer_unity_en-hk",type="value", label="Model")
141
  out = gr.Audio(label="Output")
142
 
143
  btn.click(inference, inputs=[audio, model], outputs=out)