wietsedv commited on
Commit
38a727d
1 Parent(s): 3de0ad2

initial version

Browse files
Files changed (3) hide show
  1. app.py +84 -0
  2. packages.txt +2 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import urllib.request
4
+ from pathlib import Path
5
+ import os
6
+ import torch
7
+ import scipy.io.wavfile
8
+ from espnet2.bin.tts_inference import Text2Speech
9
+ from espnet2.utils.types import str_or_none
10
+
11
+
12
+ def load_model(model_tag, vocoder_tag):
13
+ from espnet_model_zoo.downloader import ModelDownloader
14
+
15
+ kwargs = {}
16
+
17
+ # Model
18
+ d = ModelDownloader()
19
+ kwargs = d.download_and_unpack(model_tag)
20
+
21
+ # Vocoder
22
+ download_dir = Path(os.path.expanduser("~/.cache/parallel_wavegan"))
23
+ vocoder_dir = download_dir / vocoder_tag
24
+ os.makedirs(vocoder_dir, exist_ok=True)
25
+
26
+ kwargs["vocoder_config"] = vocoder_dir / "config.yml"
27
+ if not kwargs["vocoder_config"].exists():
28
+ urllib.request.urlretrieve(f"https://huggingface.co/{vocoder_tag}/resolve/main/config.yml", kwargs["vocoder_config"])
29
+
30
+ kwargs["vocoder_file"] = vocoder_dir / "checkpoint-50000steps.pkl"
31
+ if not kwargs["vocoder_file"].exists():
32
+ urllib.request.urlretrieve(f"https://huggingface.co/{vocoder_tag}/resolve/main/checkpoint-50000steps.pkl", kwargs["vocoder_file"])
33
+
34
+ return Text2Speech(
35
+ **kwargs,
36
+ device="cpu",
37
+ threshold=0.5,
38
+ minlenratio=0.0,
39
+ maxlenratio=10.0,
40
+ use_att_constraint=True,
41
+ backward_window=1,
42
+ forward_window=4,
43
+ )
44
+
45
+ gos_text2speech = load_model('https://huggingface.co/wietsedv/tacotron2-gronings/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip', 'wietsedv/parallelwavegan-gronings')
46
+ nld_text2speech = load_model('https://huggingface.co/wietsedv/tacotron2-dutch/resolve/main/tts_ljspeech_finetune_tacotron2.v5_train.loss.ave.zip', 'wietsedv/parallelwavegan-dutch')
47
+ eng_text2speech = Text2Speech.from_pretrained(
48
+ model_tag="kan-bayashi/ljspeech_tacotron2",
49
+ vocoder_tag="parallel_wavegan/ljspeech_parallel_wavegan.v3",
50
+ device="cpu",
51
+ threshold=0.5,
52
+ minlenratio=0.0,
53
+ maxlenratio=10.0,
54
+ use_att_constraint=True,
55
+ backward_window=1,
56
+ forward_window=4,
57
+ )
58
+
59
+ def inference(text,lang):
60
+ with torch.no_grad():
61
+ if lang == "gronings":
62
+ wav = gos_text2speech(text)["wav"]
63
+ scipy.io.wavfile.write("out.wav", gos_text2speech.fs , wav.view(-1).cpu().numpy())
64
+ if lang == "dutch":
65
+ wav = nld_text2speech(text)["wav"]
66
+ scipy.io.wavfile.write("out.wav", nld_text2speech.fs , wav.view(-1).cpu().numpy())
67
+ if lang == "english":
68
+ wav = eng_text2speech(text)["wav"]
69
+ scipy.io.wavfile.write("out.wav", eng_text2speech.fs , wav.view(-1).cpu().numpy())
70
+
71
+ return "out.wav", "out.wav"
72
+
73
+ title = "GroTTS"
74
+ examples = [
75
+ ['Ze gingen mit klas noar Waddendiek. Over en deur bragel lopen.', 'gronings']
76
+ ]
77
+
78
+ gr.Interface(
79
+ inference,
80
+ [gr.inputs.Textbox(label="input text", lines=3), gr.inputs.Radio(choices=["gronings", "dutch", "english"], type="value", default="gronings", label="language")],
81
+ [gr.outputs.Audio(type="file", label="Output"), gr.outputs.File()],
82
+ title=title,
83
+ examples=examples
84
+ ).launch(enable_queue=True, debug=True)
packages.txt ADDED
@@ -0,0 +1,2 @@
 
 
1
+ cmake
2
+ libsndfile1
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ espnet==0.10.3
2
+ parallel_wavegan==0.5.3
3
+ espnet_model_zoo
4
+ scipy
5
+ torch