ahnafsamin commited on
Commit
f6fdf7a
1 Parent(s): ee131ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -28
app.py CHANGED
@@ -1,45 +1,107 @@
1
  import gradio as gr
2
  import time
 
 
 
3
  import torch
4
  import scipy.io.wavfile
5
  from espnet2.bin.tts_inference import Text2Speech
6
  from espnet2.utils.types import str_or_none
7
 
8
- tagen = "https://huggingface.co/wietsedv/tacotron2-gronings/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip"
9
- vocoder_tagen = "parallel_wavegan/ljspeech_parallel_wavegan.v3"
10
-
11
- text2speechen = Text2Speech.from_pretrained(
12
- model_tag=tagen,
13
- vocoder_tag=vocoder_tagen,
14
- device="cpu",
15
- threshold=0.5,
16
- minlenratio=0.0,
17
- maxlenratio=10.0,
18
- use_att_constraint=True,
19
- backward_window=1,
20
- forward_window=4,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def inference(text,lang):
24
  with torch.no_grad():
25
  if lang == "gronings":
26
- wav = text2speechen(text)["wav"]
27
- scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
28
-
29
- return "out.wav"
30
- title = "ESPnet2-TTS"
31
- description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
32
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
 
 
 
33
 
34
- examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"]]
 
 
 
35
 
36
  gr.Interface(
37
- inference,
38
- [gr.inputs.Textbox(label="input text",lines=10),gr.inputs.Radio(choices=["gronings"], type="value", default="gronings", label="language")],
39
- gr.outputs.Audio(type="file", label="Output"),
40
  title=title,
41
- description=description,
42
- article=article,
43
- enable_queue=True,
44
  examples=examples
45
- ).launch(debug=True)
 
1
  import gradio as gr
2
  import time
3
+ import urllib.request
4
+ from pathlib import Path
5
+ import os
6
  import torch
7
  import scipy.io.wavfile
8
  from espnet2.bin.tts_inference import Text2Speech
9
  from espnet2.utils.types import str_or_none
10
 
11
+
12
+ # def load_model(model_tag, vocoder_tag):
13
+ # from espnet_model_zoo.downloader import ModelDownloader
14
+
15
+ # kwargs = {}
16
+
17
+ # # Model
18
+ # d = ModelDownloader()
19
+ # kwargs = d.download_and_unpack(model_tag)
20
+
21
+ # # Vocoder
22
+ # download_dir = Path(os.path.expanduser("~/.cache/parallel_wavegan"))
23
+ # vocoder_dir = download_dir / vocoder_tag
24
+ # os.makedirs(vocoder_dir, exist_ok=True)
25
+
26
+ # kwargs["vocoder_config"] = vocoder_dir / "config.yml"
27
+ # if not kwargs["vocoder_config"].exists():
28
+ # urllib.request.urlretrieve(f"https://huggingface.co/{vocoder_tag}/resolve/main/config.yml", kwargs["vocoder_config"])
29
+
30
+ # kwargs["vocoder_file"] = vocoder_dir / "checkpoint-50000steps.pkl"
31
+ # if not kwargs["vocoder_file"].exists():
32
+ # urllib.request.urlretrieve(f"https://huggingface.co/{vocoder_tag}/resolve/main/checkpoint-50000steps.pkl", kwargs["vocoder_file"])
33
+
34
+ # return Text2Speech(
35
+ # **kwargs,
36
+ # device="cpu",
37
+ # threshold=0.5,
38
+ # minlenratio=0.0,
39
+ # maxlenratio=10.0,
40
+ # use_att_constraint=True,
41
+ # backward_window=1,
42
+ # forward_window=4,
43
+ # )
44
+
45
+ # gos_text2speech = load_model('https://huggingface.co/wietsedv/tacotron2-gronings/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip', 'wietsedv/parallelwavegan-gronings')
46
+ # nld_text2speech = load_model('https://huggingface.co/wietsedv/tacotron2-dutch/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip', 'wietsedv/parallelwavegan-dutch')
47
+
48
+ gos_text2speech = Text2Speech.from_pretrained(
49
+ model_tag="https://huggingface.co/wietsedv/tacotron2-gronings/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip",
50
+ vocoder_tag="parallel_wavegan/ljspeech_parallel_wavegan.v3",
51
+ device="cpu",
52
+ threshold=0.5,
53
+ minlenratio=0.0,
54
+ maxlenratio=10.0,
55
+ use_att_constraint=True,
56
+ backward_window=1,
57
+ forward_window=4,
58
  )
59
+ nld_text2speech = Text2Speech.from_pretrained(
60
+ model_tag="https://huggingface.co/wietsedv/tacotron2-dutch/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip",
61
+ vocoder_tag="parallel_wavegan/ljspeech_parallel_wavegan.v3",
62
+ device="cpu",
63
+ threshold=0.5,
64
+ minlenratio=0.0,
65
+ maxlenratio=10.0,
66
+ use_att_constraint=True,
67
+ backward_window=1,
68
+ forward_window=4,
69
+ )
70
+ #eng_text2speech = Text2Speech.from_pretrained(
71
+ # model_tag="kan-bayashi/ljspeech_tacotron2",
72
+ # vocoder_tag="parallel_wavegan/ljspeech_parallel_wavegan.v3",
73
+ # device="cpu",
74
+ # threshold=0.5,
75
+ # minlenratio=0.0,
76
+ # maxlenratio=10.0,
77
+ # use_att_constraint=True,
78
+ # backward_window=1,
79
+ # forward_window=4,
80
+ #)
81
 
82
  def inference(text,lang):
83
  with torch.no_grad():
84
  if lang == "gronings":
85
+ wav = gos_text2speech(text)["wav"]
86
+ scipy.io.wavfile.write("out.wav", gos_text2speech.fs , wav.view(-1).cpu().numpy())
87
+ if lang == "dutch":
88
+ wav = nld_text2speech(text)["wav"]
89
+ scipy.io.wavfile.write("out.wav", nld_text2speech.fs , wav.view(-1).cpu().numpy())
90
+ #if lang == "english":
91
+ # wav = eng_text2speech(text)["wav"]
92
+ # scipy.io.wavfile.write("out.wav", eng_text2speech.fs , wav.view(-1).cpu().numpy())
93
+
94
+ return "out.wav", "out.wav"
95
 
96
+ title = "GroTTS"
97
+ examples = [
98
+ ['Ze gingen mit klas noar waddendiek, over en deur bragel lopen.', 'gronings']
99
+ ]
100
 
101
  gr.Interface(
102
+ inference,
103
+ [gr.inputs.Textbox(label="input text", lines=3), gr.inputs.Radio(choices=["gronings", "dutch"], type="value", default="gronings", label="language")],
104
+ [gr.outputs.Audio(type="file", label="Output"), gr.outputs.File()],
105
  title=title,
 
 
 
106
  examples=examples
107
+ ).launch(enable_queue=True)