Dupaja commited on
Commit
3851704
1 Parent(s): 0f44792

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -114
app.py CHANGED
@@ -1,25 +1,8 @@
1
- # # # # # # # # # # # # # # # # # # # # # # # # # # # #
2
- # #
3
- # StyleTTS 2 Demo #
4
- # #
5
- # #
6
- # Copyright (c) 2023 mrfakename. All rights reserved. #
7
- # #
8
- # License : AGPL v3 #
9
- # Version : 2.0 #
10
- # Support : https://github.com/neuralvox/styletts2 #
11
- # #
12
- # # # # # # # # # # # # # # # # # # # # # # # # # # # #
13
-
14
-
15
  import gradio as gr
16
  import styletts2importable
17
- import ljspeechimportable
18
- import torch
19
- import os
20
- from tortoise.utils.text import split_and_recombine_text
21
  import numpy as np
22
- import pickle
 
23
  theme = gr.themes.Base(
24
  font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
25
  )
@@ -27,26 +10,83 @@ voicelist = ['f-us-1', 'f-us-2', 'f-us-3', 'f-us-4', 'm-us-1', 'm-us-2', 'm-us-3
27
  voices = {}
28
  import phonemizer
29
  global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)
30
- # todo: cache computed style, load using pickle
31
- # if os.path.exists('voices.pkl'):
32
- # with open('voices.pkl', 'rb') as f:
33
- # voices = pickle.load(f)
34
- # else:
35
  for v in voicelist:
36
  voices[v] = styletts2importable.compute_style(f'voices/{v}.wav')
37
- # def synthesize(text, voice, multispeakersteps):
38
- # if text.strip() == "":
39
- # raise gr.Error("You must enter some text")
40
- # # if len(global_phonemizer.phonemize([text])) > 300:
41
- # if len(text) > 300:
42
- # raise gr.Error("Text must be under 300 characters")
43
- # v = voice.lower()
44
- # # return (24000, styletts2importable.inference(text, voices[v], alpha=0.3, beta=0.7, diffusion_steps=7, embedding_scale=1))
45
- # return (24000, styletts2importable.inference(text, voices[v], alpha=0.3, beta=0.7, diffusion_steps=multispeakersteps, embedding_scale=1))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  def synthesize(text, voice, lngsteps, password, progress=gr.Progress()):
47
  if text.strip() == "":
48
  raise gr.Error("You must enter some text")
49
-
50
  texts = split_and_recombine_text(text)
51
  v = voice.lower()
52
  audios = []
@@ -54,94 +94,16 @@ def synthesize(text, voice, lngsteps, password, progress=gr.Progress()):
54
  audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
55
  return (24000, np.concatenate(audios))
56
 
57
- def clsynthesize(text, voice, vcsteps, progress=gr.Progress()):
58
- if text.strip() == "":
59
- raise gr.Error("You must enter some text")
60
- texts = split_and_recombine_text(text)
61
- audios = []
62
- for t in progress.tqdm(texts):
63
- audios.append(styletts2importable.inference(t, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=vcsteps, embedding_scale=1))
64
- return (24000, np.concatenate(audios))
65
- def ljsynthesize(text, steps, progress=gr.Progress()):
66
- noise = torch.randn(1,1,256).to('cuda' if torch.cuda.is_available() else 'cpu')
67
- # return (24000, ljspeechimportable.inference(text, noise, diffusion_steps=7, embedding_scale=1))
68
- if text.strip() == "":
69
- raise gr.Error("You must enter some text")
70
- texts = split_and_recombine_text(text)
71
- audios = []
72
- for t in progress.tqdm(texts):
73
- audios.append(ljspeechimportable.inference(t, noise, diffusion_steps=steps, embedding_scale=1))
74
- return (24000, np.concatenate(audios))
75
-
76
 
77
- with gr.Blocks() as vctk: # just realized it isn't vctk but libritts but i'm too lazy to change it rn
78
  with gr.Row():
79
  with gr.Column(scale=1):
80
  inp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
81
  voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-2', interactive=True)
82
  multispeakersteps = gr.Slider(minimum=3, maximum=15, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
83
- # use_gruut = gr.Checkbox(label="Use alternate phonemizer (Gruut) - Experimental")
84
  with gr.Column(scale=1):
85
  btn = gr.Button("Synthesize", variant="primary")
86
  audio = gr.Audio(interactive=False, label="Synthesized Audio")
87
  btn.click(synthesize, inputs=[inp, voice, multispeakersteps], outputs=[audio], concurrency_limit=4)
88
- with gr.Blocks() as clone:
89
- with gr.Row():
90
- with gr.Column(scale=1):
91
- clinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
92
- clvoice = gr.Audio(label="Voice", interactive=True, type='filepath', max_length=300)
93
- vcsteps = gr.Slider(minimum=3, maximum=20, value=20, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
94
- with gr.Column(scale=1):
95
- clbtn = gr.Button("Synthesize", variant="primary")
96
- claudio = gr.Audio(interactive=False, label="Synthesized Audio")
97
- clbtn.click(clsynthesize, inputs=[clinp, clvoice, vcsteps], outputs=[claudio], concurrency_limit=4)
98
- # with gr.Blocks() as longText:
99
- # with gr.Row():
100
- # with gr.Column(scale=1):
101
- # lnginp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
102
- # lngvoice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-1', interactive=True)
103
- # lngsteps = gr.Slider(minimum=5, maximum=25, value=10, step=1, label="Diffusion Steps", info="Higher = better quality, but slower", interactive=True)
104
- # lngpwd = gr.Textbox(label="Access code", info="This feature is in beta. You need an access code to use it as it uses more resources and we would like to prevent abuse")
105
- # with gr.Column(scale=1):
106
- # lngbtn = gr.Button("Synthesize", variant="primary")
107
- # lngaudio = gr.Audio(interactive=False, label="Synthesized Audio")
108
- # lngbtn.click(longsynthesize, inputs=[lnginp, lngvoice, lngsteps, lngpwd], outputs=[lngaudio], concurrency_limit=4)
109
- with gr.Blocks() as lj:
110
- with gr.Row():
111
- with gr.Column(scale=1):
112
- ljinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
113
- ljsteps = gr.Slider(minimum=3, maximum=20, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
114
- with gr.Column(scale=1):
115
- ljbtn = gr.Button("Synthesize", variant="primary")
116
- ljaudio = gr.Audio(interactive=False, label="Synthesized Audio")
117
- ljbtn.click(ljsynthesize, inputs=[ljinp, ljsteps], outputs=[ljaudio], concurrency_limit=4)
118
- with gr.Blocks(title="StyleTTS 2", css="", theme=theme) as demo:
119
- gr.Markdown("""# StyleTTS 2
120
-
121
- [Paper](https://arxiv.org/abs/2306.07691) - [Samples](https://styletts2.github.io/) - [Code](https://github.com/yl4579/StyleTTS2)
122
-
123
- A free demo of StyleTTS 2. **I am not affiliated with the StyleTTS 2 Authors.**
124
-
125
- #### Help this space get to the top of HF's trending list! Please give this space a Like!
126
-
127
- **Before using this demo, you agree to inform the listeners that the speech samples are synthesized by the pre-trained models, unless you have the permission to use the voice you synthesize. That is, you agree to only use voices whose speakers grant the permission to have their voice cloned, either directly or by license before making synthesized voices public, or you have to publicly announce that these voices are synthesized if you do not have the permission to use these voices.**
128
-
129
- Is there a long queue on this space? Duplicate it and add a more powerful GPU to skip the wait! **Note: Thank you to Hugging Face for their generous GPU grant program!**
130
-
131
- **NOTE: StyleTTS 2 does better on longer texts.** For example, making it say "hi" will produce a lower-quality result than making it say a longer phrase.""")
132
- gr.DuplicateButton("Duplicate Space")
133
- # gr.TabbedInterface([vctk, clone, lj, longText], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]'])
134
- gr.TabbedInterface([vctk, clone, lj], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]'])
135
- gr.Markdown("""
136
- Demo by [mrfakename](https://twitter.com/realmrfakename). I am not affiliated with the StyleTTS 2 authors.
137
-
138
- Run this demo locally using Docker:
139
-
140
- ```bash
141
- docker run -it -p 7860:7860 --platform=linux/amd64 --gpus all registry.hf.space/styletts2-styletts2:latest python app.py
142
- ```
143
- """) # Please do not remove this line.
144
- if __name__ == "__main__":
145
- # demo.queue(api_open=False, max_size=15).launch(show_api=False)
146
- demo.queue(api_open=True, max_size=15).launch(show_api=True)
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import styletts2importable
 
 
 
 
3
  import numpy as np
4
+ import re
5
+
6
  theme = gr.themes.Base(
7
  font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
8
  )
 
10
  voices = {}
11
  import phonemizer
12
  global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)
 
 
 
 
 
13
  for v in voicelist:
14
  voices[v] = styletts2importable.compute_style(f'voices/{v}.wav')
15
+ def split_and_recombine_text(text, desired_length=200, max_length=400):
16
+ """Split text it into chunks of a desired length trying to keep sentences intact."""
17
+ # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii
18
+ text = re.sub(r'\n\n+', '\n', text)
19
+ text = re.sub(r'\s+', ' ', text)
20
+ text = re.sub(r'[“”]', '"', text)
21
+
22
+ rv = []
23
+ in_quote = False
24
+ current = ""
25
+ split_pos = []
26
+ pos = -1
27
+ end_pos = len(text) - 1
28
+
29
+ def seek(delta):
30
+ nonlocal pos, in_quote, current
31
+ is_neg = delta < 0
32
+ for _ in range(abs(delta)):
33
+ if is_neg:
34
+ pos -= 1
35
+ current = current[:-1]
36
+ else:
37
+ pos += 1
38
+ current += text[pos]
39
+ if text[pos] == '"':
40
+ in_quote = not in_quote
41
+ return text[pos]
42
+
43
+ def peek(delta):
44
+ p = pos + delta
45
+ return text[p] if p < end_pos and p >= 0 else ""
46
+
47
+ def commit():
48
+ nonlocal rv, current, split_pos
49
+ rv.append(current)
50
+ current = ""
51
+ split_pos = []
52
+
53
+ while pos < end_pos:
54
+ c = seek(1)
55
+ # do we need to force a split?
56
+ if len(current) >= max_length:
57
+ if len(split_pos) > 0 and len(current) > (desired_length / 2):
58
+ # we have at least one sentence and we are over half the desired length, seek back to the last split
59
+ d = pos - split_pos[-1]
60
+ seek(-d)
61
+ else:
62
+ # no full sentences, seek back until we are not in the middle of a word and split there
63
+ while c not in '!?.\n ' and pos > 0 and len(current) > desired_length:
64
+ c = seek(-1)
65
+ commit()
66
+ # check for sentence boundaries
67
+ elif not in_quote and (c in '!?\n' or (c == '.' and peek(1) in '\n ')):
68
+ # seek forward if we have consecutive boundary markers but still within the max length
69
+ while pos < len(text) - 1 and len(current) < max_length and peek(1) in '!?.':
70
+ c = seek(1)
71
+ split_pos.append(pos)
72
+ if len(current) >= desired_length:
73
+ commit()
74
+ # treat end of quote as a boundary if its followed by a space or newline
75
+ elif in_quote and peek(1) == '"' and peek(2) in '\n ':
76
+ seek(2)
77
+ split_pos.append(pos)
78
+ rv.append(current)
79
+
80
+ # clean up, remove lines with only whitespace or punctuation
81
+ rv = [s.strip() for s in rv]
82
+ rv = [s for s in rv if len(s) > 0 and not re.match(r'^[\s\.,;:!?]*$', s)]
83
+
84
+ return rv
85
+
86
  def synthesize(text, voice, lngsteps, password, progress=gr.Progress()):
87
  if text.strip() == "":
88
  raise gr.Error("You must enter some text")
89
+
90
  texts = split_and_recombine_text(text)
91
  v = voice.lower()
92
  audios = []
 
94
  audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
95
  return (24000, np.concatenate(audios))
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ with gr.Blocks() as demo:
99
  with gr.Row():
100
  with gr.Column(scale=1):
101
  inp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
102
  voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-2', interactive=True)
103
  multispeakersteps = gr.Slider(minimum=3, maximum=15, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
 
104
  with gr.Column(scale=1):
105
  btn = gr.Button("Synthesize", variant="primary")
106
  audio = gr.Audio(interactive=False, label="Synthesized Audio")
107
  btn.click(synthesize, inputs=[inp, voice, multispeakersteps], outputs=[audio], concurrency_limit=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
+ demo.queue(api_open=True, max_size=15).launch(show_api=True)