Ahsen Khaliq commited on
Commit
ebe1109
1 Parent(s): 6f53eb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -79,7 +79,7 @@ def inference(text,lang):
79
  scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
80
  if lang == "chinese":
81
  wav = text2speechch(text)["wav"]
82
- scipy.io.wavfile.write("out.wav",text2speechench.fs , wav.view(-1).cpu().numpy())
83
  if lang == "japanese":
84
  wav = text2speechjp(text)["wav"]
85
  scipy.io.wavfile.write("out.wav",text2speechjp.fs , wav.view(-1).cpu().numpy())
@@ -88,7 +88,7 @@ title = "ESPnet2-TTS"
88
  description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
89
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
90
 
91
- examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"],['水をマレーシアから買わなくてはならないのです。',"japanese"],['对英语和日语语料库的实验评估表明,我们提供的模型合成了与真实情况相当的话语,实现了最先进的 TTS 性能',"chinese"]]
92
 
93
  gr.Interface(
94
  inference,
 
79
  scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
80
  if lang == "chinese":
81
  wav = text2speechch(text)["wav"]
82
+ scipy.io.wavfile.write("out.wav",text2speechch.fs , wav.view(-1).cpu().numpy())
83
  if lang == "japanese":
84
  wav = text2speechjp(text)["wav"]
85
  scipy.io.wavfile.write("out.wav",text2speechjp.fs , wav.view(-1).cpu().numpy())
 
88
  description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
89
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
90
 
91
+ examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"],['水をマレーシアから買わなくてはならないのです。',"japanese"],['对英语和日语语料库的实验评估表明,我们提供的模型合成了与真实情况相当的话语,达到了最先进的水平',"chinese"]]
92
 
93
  gr.Interface(
94
  inference,