Hev832 commited on
Commit
ea2887c
·
verified ·
1 Parent(s): 94b75cf

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +319 -0
app.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import datetime
3
+ import logging
4
+ import os
5
+ import time
6
+ import traceback
7
+
8
+ import edge_tts
9
+ import gradio as gr
10
+ import librosa
11
+ import torch
12
+ from fairseq import checkpoint_utils
13
+
14
+ from config import Config
15
+ from lib.infer_pack.models import (
16
+ SynthesizerTrnMs256NSFsid,
17
+ SynthesizerTrnMs256NSFsid_nono,
18
+ SynthesizerTrnMs768NSFsid,
19
+ SynthesizerTrnMs768NSFsid_nono,
20
+ )
21
+ from rmvpe import RMVPE
22
+ from vc_infer_pipeline import VC
23
+
24
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
25
+ logging.getLogger("numba").setLevel(logging.WARNING)
26
+ logging.getLogger("markdown_it").setLevel(logging.WARNING)
27
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
28
+ logging.getLogger("matplotlib").setLevel(logging.WARNING)
29
+
30
+ limitation = os.getenv("SYSTEM") == "spaces"
31
+
32
+ config = Config()
33
+
34
+ edge_output_filename = "edge_output.mp3"
35
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
36
+ tts_voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
37
+
38
+ model_root = "weights"
39
+ models = [
40
+ d for d in os.listdir(model_root) if os.path.isdir(os.path.join(model_root, d))
41
+ ]
42
+ if len(models) == 0:
43
+ raise ValueError("No model found in `weights` folder")
44
+ models.sort()
45
+
46
+
47
+ def model_data(model_name):
48
+ # global n_spk, tgt_sr, net_g, vc, cpt, version, index_file
49
+ pth_files = [
50
+ os.path.join(model_root, model_name, f)
51
+ for f in os.listdir(os.path.join(model_root, model_name))
52
+ if f.endswith(".pth")
53
+ ]
54
+ if len(pth_files) == 0:
55
+ raise ValueError(f"No pth file found in {model_root}/{model_name}")
56
+ pth_path = pth_files[0]
57
+ print(f"Loading {pth_path}")
58
+ cpt = torch.load(pth_path, map_location="cpu")
59
+ tgt_sr = cpt["config"][-1]
60
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
61
+ if_f0 = cpt.get("f0", 1)
62
+ version = cpt.get("version", "v1")
63
+ if version == "v1":
64
+ if if_f0 == 1:
65
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
66
+ else:
67
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
68
+ elif version == "v2":
69
+ if if_f0 == 1:
70
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
71
+ else:
72
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
73
+ else:
74
+ raise ValueError("Unknown version")
75
+ del net_g.enc_q
76
+ net_g.load_state_dict(cpt["weight"], strict=False)
77
+ print("Model loaded")
78
+ net_g.eval().to(config.device)
79
+ if config.is_half:
80
+ net_g = net_g.half()
81
+ else:
82
+ net_g = net_g.float()
83
+ vc = VC(tgt_sr, config)
84
+ # n_spk = cpt["config"][-3]
85
+
86
+ index_files = [
87
+ os.path.join(model_root, model_name, f)
88
+ for f in os.listdir(os.path.join(model_root, model_name))
89
+ if f.endswith(".index")
90
+ ]
91
+ if len(index_files) == 0:
92
+ print("No index file found")
93
+ index_file = ""
94
+ else:
95
+ index_file = index_files[0]
96
+ print(f"Index file found: {index_file}")
97
+
98
+ return tgt_sr, net_g, vc, version, index_file, if_f0
99
+
100
+
101
+ def load_hubert():
102
+ global hubert_model
103
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
104
+ ["hubert_base.pt"],
105
+ suffix="",
106
+ )
107
+ hubert_model = models[0]
108
+ hubert_model = hubert_model.to(config.device)
109
+ if config.is_half:
110
+ hubert_model = hubert_model.half()
111
+ else:
112
+ hubert_model = hubert_model.float()
113
+ return hubert_model.eval()
114
+
115
+
116
+ print("Loading hubert model...")
117
+ hubert_model = load_hubert()
118
+ print("Hubert model loaded.")
119
+
120
+ print("Loading rmvpe model...")
121
+ rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device)
122
+ print("rmvpe model loaded.")
123
+
124
+
125
+ def tts(
126
+ model_name,
127
+ speed,
128
+ tts_text,
129
+ tts_voice,
130
+ f0_up_key,
131
+ f0_method,
132
+ index_rate,
133
+ protect,
134
+ filter_radius=3,
135
+ resample_sr=0,
136
+ rms_mix_rate=0.25,
137
+ ):
138
+ print("------------------")
139
+ print(datetime.datetime.now())
140
+ print("tts_text:")
141
+ print(tts_text)
142
+ print(f"tts_voice: {tts_voice}")
143
+ print(f"Model name: {model_name}")
144
+ print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}")
145
+ try:
146
+ if limitation and len(tts_text) > 280:
147
+ print("Error: Text too long")
148
+ return (
149
+ f"Text characters should be at most 280 in this huggingface space, but got {len(tts_text)} characters.",
150
+ None,
151
+ None,
152
+ )
153
+ tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name)
154
+ t0 = time.time()
155
+ if speed >= 0:
156
+ speed_str = f"+{speed}%"
157
+ else:
158
+ speed_str = f"{speed}%"
159
+ asyncio.run(
160
+ edge_tts.Communicate(
161
+ tts_text, "-".join(tts_voice.split("-")[:-1]), rate=speed_str
162
+ ).save(edge_output_filename)
163
+ )
164
+ t1 = time.time()
165
+ edge_time = t1 - t0
166
+ audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True)
167
+ duration = len(audio) / sr
168
+ print(f"Audio duration: {duration}s")
169
+ if limitation and duration >= 20:
170
+ print("Error: Audio too long")
171
+ return (
172
+ f"Audio should be less than 20 seconds in this huggingface space, but got {duration}s.",
173
+ edge_output_filename,
174
+ None,
175
+ )
176
+
177
+ f0_up_key = int(f0_up_key)
178
+
179
+ if not hubert_model:
180
+ load_hubert()
181
+ if f0_method == "rmvpe":
182
+ vc.model_rmvpe = rmvpe_model
183
+ times = [0, 0, 0]
184
+ audio_opt = vc.pipeline(
185
+ hubert_model,
186
+ net_g,
187
+ 0,
188
+ audio,
189
+ edge_output_filename,
190
+ times,
191
+ f0_up_key,
192
+ f0_method,
193
+ index_file,
194
+ # file_big_npy,
195
+ index_rate,
196
+ if_f0,
197
+ filter_radius,
198
+ tgt_sr,
199
+ resample_sr,
200
+ rms_mix_rate,
201
+ version,
202
+ protect,
203
+ None,
204
+ )
205
+ if tgt_sr != resample_sr >= 16000:
206
+ tgt_sr = resample_sr
207
+ info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s"
208
+ print(info)
209
+ return (
210
+ info,
211
+ edge_output_filename,
212
+ (tgt_sr, audio_opt),
213
+ )
214
+ except EOFError:
215
+ info = (
216
+ "It seems that the edge-tts output is not valid. "
217
+ "This may occur when the input text and the speaker do not match. "
218
+ "For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?"
219
+ )
220
+ print(info)
221
+ return info, None, None
222
+ except:
223
+ info = traceback.format_exc()
224
+ print(info)
225
+ return info, None, None
226
+
227
+
228
+ initial_md = """
229
+ # RVC text-to-speech webui
230
+
231
+ This is a text-to-speech webui of RVC models.
232
+
233
+ Input text ➡[(edge-tts)](https://github.com/rany2/edge-tts)➡ Speech mp3 file ➡[(RVC)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)➡ Final output
234
+ """
235
+
236
+ app = gr.Blocks(theme="Hev832/emerald", title="RVC-TTS")
237
+ with app:
238
+ gr.Markdown(initial_md)
239
+ with gr.Row():
240
+ with gr.Column():
241
+ model_name = gr.Dropdown(label="Model", choices=models, value=models[0])
242
+ f0_key_up = gr.Number(
243
+ label="Transpose (the best value depends on the models and speakers)",
244
+ value=0,
245
+ )
246
+ with gr.Column():
247
+ f0_method = gr.Radio(
248
+ label="Pitch extraction method (Rmvpe is default)",
249
+ choices=["rmvpe", "crepe"], # harvest is too slow
250
+ value="rmvpe",
251
+ interactive=True,
252
+ )
253
+ index_rate = gr.Slider(
254
+ minimum=0,
255
+ maximum=1,
256
+ label="Index rate",
257
+ value=1,
258
+ interactive=True,
259
+ )
260
+ protect0 = gr.Slider(
261
+ minimum=0,
262
+ maximum=0.5,
263
+ label="Protect",
264
+ value=0.33,
265
+ step=0.01,
266
+ interactive=True,
267
+ )
268
+ with gr.Row():
269
+ with gr.Column():
270
+ tts_voice = gr.Dropdown(
271
+ label="Edge-tts speaker (format: language-Country-Name-Gender)",
272
+ choices=tts_voices,
273
+ allow_custom_value=False,
274
+ value="ja-JP-NanamiNeural-Female",
275
+ )
276
+ speed = gr.Slider(
277
+ minimum=-100,
278
+ maximum=100,
279
+ label="Speech speed (%)",
280
+ value=0,
281
+ step=10,
282
+ interactive=True,
283
+ )
284
+ tts_text = gr.Textbox(label="Input Text", value="これは日本語テキストから音声への変換デモです。")
285
+ with gr.Column():
286
+ but0 = gr.Button("Convert", variant="primary")
287
+ info_text = gr.Textbox(label="Output info")
288
+ with gr.Column():
289
+ edge_tts_output = gr.Audio(label="Edge Voice", type="filepath")
290
+ tts_output = gr.Audio(label="Result")
291
+ but0.click(
292
+ tts,
293
+ [
294
+ model_name,
295
+ speed,
296
+ tts_text,
297
+ tts_voice,
298
+ f0_key_up,
299
+ f0_method,
300
+ index_rate,
301
+ protect0,
302
+ ],
303
+ [info_text, edge_tts_output, tts_output],
304
+ )
305
+ with gr.Row():
306
+ examples = gr.Examples(
307
+ examples_per_page=100,
308
+ examples=[
309
+ ["これは日本語テキストから音声への変換デモです。", "ja-JP-NanamiNeural-Female"],
310
+ [
311
+ "This is an English text to speech conversation demo.",
312
+ "en-US-AriaNeural-Female",
313
+ ],
314
+ ],
315
+ inputs=[tts_text, tts_voice],
316
+ )
317
+
318
+
319
+ app.launch()