Vijish commited on
Commit
5f563af
·
verified ·
1 Parent(s): ad49cff

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +309 -0
app.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import datetime
3
+ import logging
4
+ import os
5
+ import time
6
+ import traceback
7
+
8
+ import edge_tts
9
+ import gradio as gr
10
+ import librosa
11
+ import torch
12
+ from fairseq import checkpoint_utils
13
+
14
+ from config import Config
15
+ from lib.infer_pack.models import (
16
+ SynthesizerTrnMs256NSFsid,
17
+ SynthesizerTrnMs256NSFsid_nono,
18
+ SynthesizerTrnMs768NSFsid,
19
+ SynthesizerTrnMs768NSFsid_nono,
20
+ )
21
+ from rmvpe import RMVPE
22
+ from vc_infer_pipeline import VC
23
+
24
+ # Set logging levels
25
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
26
+ logging.getLogger("numba").setLevel(logging.WARNING)
27
+ logging.getLogger("markdown_it").setLevel(logging.WARNING)
28
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
29
+ logging.getLogger("matplotlib").setLevel(logging.WARNING)
30
+
31
+ limitation = os.getenv("SYSTEM") == "spaces"
32
+
33
+ config = Config()
34
+
35
+ # Edge TTS
36
+ edge_output_filename = "edge_output.mp3"
37
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
38
+ tts_voices = ["mn-MN-BataaNeural", "mn-MN-YesuiNeural"] # Specific voices
39
+
40
+ # RVC models
41
+ model_root = "weights"
42
+ models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")]
43
+ models.sort()
44
+
45
+
46
+ def model_data(model_name):
47
+ # global n_spk, tgt_sr, net_g, vc, cpt, version, index_file
48
+ pth_path = [
49
+ f"{model_root}/{model_name}/{f}"
50
+ for f in os.listdir(f"{model_root}/{model_name}")
51
+ if f.endswith(".pth")
52
+ ][0]
53
+ print(f"Loading {pth_path}")
54
+ cpt = torch.load(pth_path, map_location="cpu")
55
+ tgt_sr = cpt["config"][-1]
56
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
57
+ if_f0 = cpt.get("f0", 1)
58
+ version = cpt.get("version", "v1")
59
+ if version == "v1":
60
+ if if_f0 == 1:
61
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
62
+ else:
63
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
64
+ elif version == "v2":
65
+ if if_f0 == 1:
66
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
67
+ else:
68
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
69
+ else:
70
+ raise ValueError("Unknown version")
71
+ del net_g.enc_q
72
+ net_g.load_state_dict(cpt["weight"], strict=False)
73
+ print("Model loaded")
74
+ net_g.eval().to(config.device)
75
+ if config.is_half:
76
+ net_g = net_g.half()
77
+ else:
78
+ net_g = net_g.float()
79
+ vc = VC(tgt_sr, config)
80
+ # n_spk = cpt["config"][-3]
81
+
82
+ index_files = [
83
+ f"{model_root}/{model_name}/{f}"
84
+ for f in os.listdir(f"{model_root}/{model_name}")
85
+ if f.endswith(".index")
86
+ ]
87
+ if len(index_files) == 0:
88
+ print("No index file found")
89
+ index_file = ""
90
+ else:
91
+ index_file = index_files[0]
92
+ print(f"Index file found: {index_file}")
93
+
94
+ return tgt_sr, net_g, vc, version, index_file, if_f0
95
+
96
+
97
+ def load_hubert():
98
+ # global hubert_model
99
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
100
+ ["hubert_base.pt"],
101
+ suffix="",
102
+ )
103
+ hubert_model = models[0]
104
+ hubert_model = hubert_model.to(config.device)
105
+ if config.is_half:
106
+ hubert_model = hubert_model.half()
107
+ else:
108
+ hubert_model = hubert_model.float()
109
+ return hubert_model.eval()
110
+
111
+
112
+ def tts(
113
+ model_name,
114
+ speed,
115
+ tts_text,
116
+ tts_voice,
117
+ f0_up_key,
118
+ f0_method,
119
+ index_rate,
120
+ protect,
121
+ filter_radius=3,
122
+ resample_sr=0,
123
+ rms_mix_rate=0.25,
124
+ ):
125
+ print("------------------")
126
+ print(datetime.datetime.now())
127
+ print("tts_text:")
128
+ print(tts_text)
129
+ print(f"tts_voice: {tts_voice}, speed: {speed}")
130
+ print(f"Model name: {model_name}")
131
+ print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}")
132
+ try:
133
+ if limitation and len(tts_text) > 280:
134
+ print("Error: Text too long")
135
+ return (
136
+ f"Text characters should be at most 280 in this huggingface space, but got {len(tts_text)} characters.",
137
+ None,
138
+ None,
139
+ )
140
+ t0 = time.time()
141
+ if speed >= 0:
142
+ speed_str = f"+{speed}%"
143
+ else:
144
+ speed_str = f"{speed}%"
145
+ asyncio.run(
146
+ edge_tts.Communicate(
147
+ tts_text, tts_voice, rate=speed_str
148
+ ).save(edge_output_filename)
149
+ )
150
+ t1 = time.time()
151
+ edge_time = t1 - t0
152
+ audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True)
153
+ duration = len(audio) / sr
154
+ print(f"Audio duration: {duration}s")
155
+ if limitation and duration >= 20:
156
+ print("Error: Audio too long")
157
+ return (
158
+ f"Audio should be less than 20 seconds in this huggingface space, but got {duration}s.",
159
+ edge_output_filename,
160
+ None,
161
+ )
162
+ f0_up_key = int(f0_up_key)
163
+
164
+ tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name)
165
+ if f0_method == "rmvpe":
166
+ vc.model_rmvpe = rmvpe_model
167
+ times = [0, 0, 0]
168
+ audio_opt = vc.pipeline(
169
+ hubert_model,
170
+ net_g,
171
+ 0,
172
+ audio,
173
+ edge_output_filename,
174
+ times,
175
+ f0_up_key,
176
+ f0_method,
177
+ index_file,
178
+ # file_big_npy,
179
+ index_rate,
180
+ if_f0,
181
+ filter_radius,
182
+ tgt_sr,
183
+ resample_sr,
184
+ rms_mix_rate,
185
+ version,
186
+ protect,
187
+ None,
188
+ )
189
+ if tgt_sr != resample_sr >= 16000:
190
+ tgt_sr = resample_sr
191
+ info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s"
192
+ print(info)
193
+ return (
194
+ info,
195
+ edge_output_filename,
196
+ (tgt_sr, audio_opt),
197
+ )
198
+ except EOFError:
199
+ info = (
200
+ "It seems that the edge-tts output is not valid. "
201
+ "This may occur when the input text and the speaker do not match. "
202
+ "For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?"
203
+ )
204
+ print(info)
205
+ return info, None, None
206
+ except:
207
+ info = traceback.format_exc()
208
+ print(info)
209
+ return info, None, None
210
+
211
+
212
+ print("Loading hubert model...")
213
+ hubert_model = load_hubert()
214
+ print("Hubert model loaded.")
215
+
216
+ print("Loading rmvpe model...")
217
+ rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device)
218
+ print("rmvpe model loaded.")
219
+
220
+ initial_md = """
221
+ # RVC text-to-speech demo
222
+
223
+ This is a text-to-speech demo of RVC moe models of [rvc_okiba](https://huggingface.co/litagin/rvc_okiba) using [edge-tts](https://github.com/rany2/edge-tts).
224
+
225
+ Input text ➡[(edge-tts)](https://github.com/rany2/edge-tts)➡ Speech mp3 file ➡[(RVC)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)➡ Final output
226
+
227
+ This runs on the 🤗 server's cpu, so it may be slow.
228
+
229
+ Although the models are trained on Japanese voices and intended for Japanese text, they can also be used with other languages with the corresponding edge-tts speaker (but possibly with a Japanese accent).
230
+
231
+ Input characters are limited to 280 characters, and the speech audio is limited to 20 seconds in this 🤗 space.
232
+
233
+ [Visit this GitHub repo](https://github.com/litagin02/rvc-tts-webui) for running locally with your models and GPU!
234
+ """
235
+
236
+ app = gr.Blocks()
237
+ with app:
238
+ with gr.Row():
239
+ with gr.Column():
240
+ model_name = gr.Dropdown(
241
+ label="Model (all models except man-_ are girl models)",
242
+ choices=models,
243
+ value=models[0],
244
+ )
245
+ f0_key_up = gr.Number(
246
+ label="Tune (+12 = 1 octave up from edge-tts, the best value depends on the models and speakers)",
247
+ value=0,
248
+ )
249
+ with gr.Column():
250
+ f0_method = gr.Radio(
251
+ label="Pitch extraction method (pm: very fast, low quality, rmvpe: a little slow, high quality)",
252
+ choices=["pm", "rmvpe"],
253
+ value="rmvpe",
254
+ interactive=True,
255
+ )
256
+ index_rate = gr.Slider(
257
+ minimum=0,
258
+ maximum=1,
259
+ label="Slang rate",
260
+ value=0.75,
261
+ interactive=True,
262
+ )
263
+ protect0 = gr.Slider(
264
+ minimum=0,
265
+ maximum=0.5,
266
+ label="Protect",
267
+ value=0.33,
268
+ step=0.01,
269
+ interactive=True,
270
+ )
271
+ with gr.Row():
272
+ with gr.Column():
273
+ tts_voice = gr.Dropdown(
274
+ label="Edge-tts speaker (format: language-Country-Name-Gender)",
275
+ choices=tts_voices,
276
+ allow_custom_value=False,
277
+ value="mn-MN-BataaNeural",
278
+ )
279
+ speed = gr.Slider(
280
+ minimum=-100,
281
+ maximum=100,
282
+ label="Speech speed (%)",
283
+ value=0,
284
+ step=10,
285
+ interactive=True,
286
+ )
287
+ tts_text = gr.Textbox(label="Input Text", value="Текстыг оруулна уу.")
288
+ with gr.Column():
289
+ but0 = gr.Button("Convert", variant="primary")
290
+ info_text = gr.Textbox(label="Output info")
291
+ with gr.Column():
292
+ edge_tts_output = gr.Audio(label="Edge Voice", type="filepath")
293
+ tts_output = gr.Audio(label="Result")
294
+ but0.click(
295
+ tts,
296
+ [
297
+ model_name,
298
+ speed,
299
+ tts_text,
300
+ tts_voice,
301
+ f0_key_up,
302
+ f0_method,
303
+ index_rate,
304
+ protect0,
305
+ ],
306
+ [info_text, edge_tts_output, tts_output],
307
+ )
308
+
309
+ app.launch()