ahnafsamin commited on
Commit
293d0d2
1 Parent(s): 81ee871

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -65
app.py CHANGED
@@ -5,94 +5,37 @@ import scipy.io.wavfile
5
  from espnet2.bin.tts_inference import Text2Speech
6
  from espnet2.utils.types import str_or_none
7
 
8
- tagen = 'kan-bayashi/ljspeech_vits'
9
- vocoder_tagen = "none"
10
 
11
  text2speechen = Text2Speech.from_pretrained(
12
  model_tag=str_or_none(tagen),
13
  vocoder_tag=str_or_none(vocoder_tagen),
14
  device="cpu",
15
- # Only for Tacotron 2 & Transformer
16
  threshold=0.5,
17
- # Only for Tacotron 2
18
  minlenratio=0.0,
19
  maxlenratio=10.0,
20
- use_att_constraint=False,
21
  backward_window=1,
22
- forward_window=3,
23
- # Only for FastSpeech & FastSpeech2 & VITS
24
- speed_control_alpha=1.0,
25
- # Only for VITS
26
- noise_scale=0.333,
27
- noise_scale_dur=0.333,
28
- )
29
-
30
-
31
- tagjp = 'kan-bayashi/jsut_full_band_vits_prosody'
32
- vocoder_tagjp = 'none'
33
-
34
- text2speechjp = Text2Speech.from_pretrained(
35
- model_tag=str_or_none(tagjp),
36
- vocoder_tag=str_or_none(vocoder_tagjp),
37
- device="cpu",
38
- # Only for Tacotron 2 & Transformer
39
- threshold=0.5,
40
- # Only for Tacotron 2
41
- minlenratio=0.0,
42
- maxlenratio=10.0,
43
- use_att_constraint=False,
44
- backward_window=1,
45
- forward_window=3,
46
- # Only for FastSpeech & FastSpeech2 & VITS
47
- speed_control_alpha=1.0,
48
- # Only for VITS
49
- noise_scale=0.333,
50
- noise_scale_dur=0.333,
51
- )
52
-
53
- tagch = 'kan-bayashi/csmsc_full_band_vits'
54
- vocoder_tagch = "none"
55
-
56
- text2speechch = Text2Speech.from_pretrained(
57
- model_tag=str_or_none(tagch),
58
- vocoder_tag=str_or_none(vocoder_tagch),
59
- device="cpu",
60
- # Only for Tacotron 2 & Transformer
61
- threshold=0.5,
62
- # Only for Tacotron 2
63
- minlenratio=0.0,
64
- maxlenratio=10.0,
65
- use_att_constraint=False,
66
- backward_window=1,
67
- forward_window=3,
68
- # Only for FastSpeech & FastSpeech2 & VITS
69
- speed_control_alpha=1.0,
70
- # Only for VITS
71
- noise_scale=0.333,
72
- noise_scale_dur=0.333,
73
  )
74
 
75
  def inference(text,lang):
76
  with torch.no_grad():
77
- if lang == "english":
78
  wav = text2speechen(text)["wav"]
79
  scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
80
- if lang == "chinese":
81
- wav = text2speechch(text)["wav"]
82
- scipy.io.wavfile.write("out.wav",text2speechch.fs , wav.view(-1).cpu().numpy())
83
- if lang == "japanese":
84
- wav = text2speechjp(text)["wav"]
85
- scipy.io.wavfile.write("out.wav",text2speechjp.fs , wav.view(-1).cpu().numpy())
86
  return "out.wav"
87
  title = "ESPnet2-TTS"
88
  description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
89
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
90
 
91
- examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"],['レシピの統一された設計により、ユーザーは最先端のE2E-TTSの結果をすばやく再現できます。また、推論用の統合Pythonインターフェースで事前にトレーニングされたモデルを多数提供し、ユーザーがベースラインサンプルを生成してデモを構築するための迅速な手段を提供します。',"japanese"],['对英语和日语语料库的实验评估表明,我们提供的模型合成了与真实情况相当的话语,达到了最先进的水平',"chinese"]]
92
 
93
  gr.Interface(
94
  inference,
95
- [gr.inputs.Textbox(label="input text",lines=10),gr.inputs.Radio(choices=["english", "chinese", "japanese"], type="value", default="english", label="language")],
96
  gr.outputs.Audio(type="file", label="Output"),
97
  title=title,
98
  description=description,
 
5
  from espnet2.bin.tts_inference import Text2Speech
6
  from espnet2.utils.types import str_or_none
7
 
8
+ tagen = "https://huggingface.co/wietsedv/tacotron2-gronings/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip"
9
+ vocoder_tagen = "parallel_wavegan/ljspeech_parallel_wavegan.v3"
10
 
11
  text2speechen = Text2Speech.from_pretrained(
12
  model_tag=str_or_none(tagen),
13
  vocoder_tag=str_or_none(vocoder_tagen),
14
  device="cpu",
 
15
  threshold=0.5,
 
16
  minlenratio=0.0,
17
  maxlenratio=10.0,
18
+ use_att_constraint=True,
19
  backward_window=1,
20
+ forward_window=4,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
22
 
23
  def inference(text,lang):
24
  with torch.no_grad():
25
+ if lang == "gronings":
26
  wav = text2speechen(text)["wav"]
27
  scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
28
+
 
 
 
 
 
29
  return "out.wav"
30
  title = "ESPnet2-TTS"
31
  description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
32
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
33
 
34
+ examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"]]
35
 
36
  gr.Interface(
37
  inference,
38
+ [gr.inputs.Textbox(label="input text",lines=10),gr.inputs.Radio(choices=["gronings"], type="value", default="gronings", label="language")],
39
  gr.outputs.Audio(type="file", label="Output"),
40
  title=title,
41
  description=description,