nevreal commited on
Commit
b576d8c
1 Parent(s): 0db599b

Delete gui_v1.py

Browse files
Files changed (1) hide show
  1. gui_v1.py +0 -1070
gui_v1.py DELETED
@@ -1,1070 +0,0 @@
1
- import os
2
- import sys
3
- from dotenv import load_dotenv
4
- import shutil
5
-
6
- load_dotenv()
7
-
8
- os.environ["OMP_NUM_THREADS"] = "4"
9
- if sys.platform == "darwin":
10
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
11
-
12
- now_dir = os.getcwd()
13
- sys.path.append(now_dir)
14
- import multiprocessing
15
-
16
- flag_vc = False
17
-
18
-
19
- def printt(strr, *args):
20
- if len(args) == 0:
21
- print(strr)
22
- else:
23
- print(strr % args)
24
-
25
-
26
- def phase_vocoder(a, b, fade_out, fade_in):
27
- window = torch.sqrt(fade_out * fade_in)
28
- fa = torch.fft.rfft(a * window)
29
- fb = torch.fft.rfft(b * window)
30
- absab = torch.abs(fa) + torch.abs(fb)
31
- n = a.shape[0]
32
- if n % 2 == 0:
33
- absab[1:-1] *= 2
34
- else:
35
- absab[1:] *= 2
36
- phia = torch.angle(fa)
37
- phib = torch.angle(fb)
38
- deltaphase = phib - phia
39
- deltaphase = deltaphase - 2 * np.pi * torch.floor(deltaphase / 2 / np.pi + 0.5)
40
- w = 2 * np.pi * torch.arange(n // 2 + 1).to(a) + deltaphase
41
- t = torch.arange(n).unsqueeze(-1).to(a) / n
42
- result = (
43
- a * (fade_out**2)
44
- + b * (fade_in**2)
45
- + torch.sum(absab * torch.cos(w * t + phia), -1) * window / n
46
- )
47
- return result
48
-
49
-
50
- class Harvest(multiprocessing.Process):
51
- def __init__(self, inp_q, opt_q):
52
- multiprocessing.Process.__init__(self)
53
- self.inp_q = inp_q
54
- self.opt_q = opt_q
55
-
56
- def run(self):
57
- import numpy as np
58
- import pyworld
59
-
60
- while 1:
61
- idx, x, res_f0, n_cpu, ts = self.inp_q.get()
62
- f0, t = pyworld.harvest(
63
- x.astype(np.double),
64
- fs=16000,
65
- f0_ceil=1100,
66
- f0_floor=50,
67
- frame_period=10,
68
- )
69
- res_f0[idx] = f0
70
- if len(res_f0.keys()) >= n_cpu:
71
- self.opt_q.put(ts)
72
-
73
-
74
- if __name__ == "__main__":
75
- import json
76
- import multiprocessing
77
- import re
78
- import threading
79
- import time
80
- import traceback
81
- from multiprocessing import Queue, cpu_count
82
- from queue import Empty
83
-
84
- import librosa
85
- from tools.torchgate import TorchGate
86
- import numpy as np
87
- import FreeSimpleGUI as sg
88
- import sounddevice as sd
89
- import torch
90
- import torch.nn.functional as F
91
- import torchaudio.transforms as tat
92
-
93
- from infer.lib import rtrvc as rvc_for_realtime
94
- from i18n.i18n import I18nAuto
95
- from configs.config import Config
96
-
97
- i18n = I18nAuto()
98
-
99
- # device = rvc_for_realtime.config.device
100
- # device = torch.device(
101
- # "cuda"
102
- # if torch.cuda.is_available()
103
- # else ("mps" if torch.backends.mps.is_available() else "cpu")
104
- # )
105
- current_dir = os.getcwd()
106
- inp_q = Queue()
107
- opt_q = Queue()
108
- n_cpu = min(cpu_count(), 8)
109
- for _ in range(n_cpu):
110
- p = Harvest(inp_q, opt_q)
111
- p.daemon = True
112
- p.start()
113
-
114
- class GUIConfig:
115
- def __init__(self) -> None:
116
- self.pth_path: str = ""
117
- self.index_path: str = ""
118
- self.pitch: int = 0
119
- self.formant=0.0
120
- self.sr_type: str = "sr_model"
121
- self.block_time: float = 0.25 # s
122
- self.threhold: int = -60
123
- self.crossfade_time: float = 0.05
124
- self.extra_time: float = 2.5
125
- self.I_noise_reduce: bool = False
126
- self.O_noise_reduce: bool = False
127
- self.use_pv: bool = False
128
- self.rms_mix_rate: float = 0.0
129
- self.index_rate: float = 0.0
130
- self.n_cpu: int = min(n_cpu, 4)
131
- self.f0method: str = "fcpe"
132
- self.sg_hostapi: str = ""
133
- self.wasapi_exclusive: bool = False
134
- self.sg_input_device: str = ""
135
- self.sg_output_device: str = ""
136
-
137
- class GUI:
138
- def __init__(self) -> None:
139
- self.gui_config = GUIConfig()
140
- self.config = Config()
141
- self.function = "vc"
142
- self.delay_time = 0
143
- self.hostapis = None
144
- self.input_devices = None
145
- self.output_devices = None
146
- self.input_devices_indices = None
147
- self.output_devices_indices = None
148
- self.stream = None
149
- self.update_devices()
150
- self.launcher()
151
-
152
- def load(self):
153
- try:
154
- if not os.path.exists("configs/inuse/config.json"):
155
- shutil.copy("configs/config.json", "configs/inuse/config.json")
156
- with open("configs/inuse/config.json", "r") as j:
157
- data = json.load(j)
158
- data["sr_model"] = data["sr_type"] == "sr_model"
159
- data["sr_device"] = data["sr_type"] == "sr_device"
160
- data["pm"] = data["f0method"] == "pm"
161
- data["harvest"] = data["f0method"] == "harvest"
162
- data["crepe"] = data["f0method"] == "crepe"
163
- data["rmvpe"] = data["f0method"] == "rmvpe"
164
- data["fcpe"] = data["f0method"] == "fcpe"
165
- if data["sg_hostapi"] in self.hostapis:
166
- self.update_devices(hostapi_name=data["sg_hostapi"])
167
- if (
168
- data["sg_input_device"] not in self.input_devices
169
- or data["sg_output_device"] not in self.output_devices
170
- ):
171
- self.update_devices()
172
- data["sg_hostapi"] = self.hostapis[0]
173
- data["sg_input_device"] = self.input_devices[
174
- self.input_devices_indices.index(sd.default.device[0])
175
- ]
176
- data["sg_output_device"] = self.output_devices[
177
- self.output_devices_indices.index(sd.default.device[1])
178
- ]
179
- else:
180
- data["sg_hostapi"] = self.hostapis[0]
181
- data["sg_input_device"] = self.input_devices[
182
- self.input_devices_indices.index(sd.default.device[0])
183
- ]
184
- data["sg_output_device"] = self.output_devices[
185
- self.output_devices_indices.index(sd.default.device[1])
186
- ]
187
- except:
188
- with open("configs/inuse/config.json", "w") as j:
189
- data = {
190
- "pth_path": "",
191
- "index_path": "",
192
- "sg_hostapi": self.hostapis[0],
193
- "sg_wasapi_exclusive": False,
194
- "sg_input_device": self.input_devices[
195
- self.input_devices_indices.index(sd.default.device[0])
196
- ],
197
- "sg_output_device": self.output_devices[
198
- self.output_devices_indices.index(sd.default.device[1])
199
- ],
200
- "sr_type": "sr_model",
201
- "threhold": -60,
202
- "pitch": 0,
203
- "formant": 0.0,
204
- "index_rate": 0,
205
- "rms_mix_rate": 0,
206
- "block_time": 0.25,
207
- "crossfade_length": 0.05,
208
- "extra_time": 2.5,
209
- "n_cpu": 4,
210
- "f0method": "rmvpe",
211
- "use_jit": False,
212
- "use_pv": False,
213
- }
214
- data["sr_model"] = data["sr_type"] == "sr_model"
215
- data["sr_device"] = data["sr_type"] == "sr_device"
216
- data["pm"] = data["f0method"] == "pm"
217
- data["harvest"] = data["f0method"] == "harvest"
218
- data["crepe"] = data["f0method"] == "crepe"
219
- data["rmvpe"] = data["f0method"] == "rmvpe"
220
- data["fcpe"] = data["f0method"] == "fcpe"
221
- return data
222
-
223
- def launcher(self):
224
- data = self.load()
225
- self.config.use_jit = False # data.get("use_jit", self.config.use_jit)
226
- sg.theme("LightBlue3")
227
- layout = [
228
- [
229
- sg.Frame(
230
- title=i18n("加载模型"),
231
- layout=[
232
- [
233
- sg.Input(
234
- default_text=data.get("pth_path", ""),
235
- key="pth_path",
236
- ),
237
- sg.FileBrowse(
238
- i18n("选择.pth文件"),
239
- initial_folder=os.path.join(
240
- os.getcwd(), "assets/weights"
241
- ),
242
- file_types=((". pth"),),
243
- ),
244
- ],
245
- [
246
- sg.Input(
247
- default_text=data.get("index_path", ""),
248
- key="index_path",
249
- ),
250
- sg.FileBrowse(
251
- i18n("选择.index文件"),
252
- initial_folder=os.path.join(os.getcwd(), "logs"),
253
- file_types=((". index"),),
254
- ),
255
- ],
256
- ],
257
- )
258
- ],
259
- [
260
- sg.Frame(
261
- layout=[
262
- [
263
- sg.Text(i18n("设备类型")),
264
- sg.Combo(
265
- self.hostapis,
266
- key="sg_hostapi",
267
- default_value=data.get("sg_hostapi", ""),
268
- enable_events=True,
269
- size=(20, 1),
270
- ),
271
- sg.Checkbox(
272
- i18n("独占 WASAPI 设备"),
273
- key="sg_wasapi_exclusive",
274
- default=data.get("sg_wasapi_exclusive", False),
275
- enable_events=True,
276
- ),
277
- ],
278
- [
279
- sg.Text(i18n("输入设备")),
280
- sg.Combo(
281
- self.input_devices,
282
- key="sg_input_device",
283
- default_value=data.get("sg_input_device", ""),
284
- enable_events=True,
285
- size=(45, 1),
286
- ),
287
- ],
288
- [
289
- sg.Text(i18n("输出设备")),
290
- sg.Combo(
291
- self.output_devices,
292
- key="sg_output_device",
293
- default_value=data.get("sg_output_device", ""),
294
- enable_events=True,
295
- size=(45, 1),
296
- ),
297
- ],
298
- [
299
- sg.Button(i18n("重载设备列表"), key="reload_devices"),
300
- sg.Radio(
301
- i18n("使用模型采样率"),
302
- "sr_type",
303
- key="sr_model",
304
- default=data.get("sr_model", True),
305
- enable_events=True,
306
- ),
307
- sg.Radio(
308
- i18n("使用设备采样率"),
309
- "sr_type",
310
- key="sr_device",
311
- default=data.get("sr_device", False),
312
- enable_events=True,
313
- ),
314
- sg.Text(i18n("采样率:")),
315
- sg.Text("", key="sr_stream"),
316
- ],
317
- ],
318
- title=i18n("音频设备"),
319
- )
320
- ],
321
- [
322
- sg.Frame(
323
- layout=[
324
- [
325
- sg.Text(i18n("响应阈值")),
326
- sg.Slider(
327
- range=(-60, 0),
328
- key="threhold",
329
- resolution=1,
330
- orientation="h",
331
- default_value=data.get("threhold", -60),
332
- enable_events=True,
333
- ),
334
- ],
335
- [
336
- sg.Text(i18n("音调设置")),
337
- sg.Slider(
338
- range=(-16, 16),
339
- key="pitch",
340
- resolution=1,
341
- orientation="h",
342
- default_value=data.get("pitch", 0),
343
- enable_events=True,
344
- ),
345
- ],
346
- [
347
- sg.Text(i18n("性别因子/声线粗细")),
348
- sg.Slider(
349
- range=(-2, 2),
350
- key="formant",
351
- resolution=0.05,
352
- orientation="h",
353
- default_value=data.get("formant", 0.0),
354
- enable_events=True,
355
- ),
356
- ],
357
- [
358
- sg.Text(i18n("Index Rate")),
359
- sg.Slider(
360
- range=(0.0, 1.0),
361
- key="index_rate",
362
- resolution=0.01,
363
- orientation="h",
364
- default_value=data.get("index_rate", 0),
365
- enable_events=True,
366
- ),
367
- ],
368
- [
369
- sg.Text(i18n("响度因子")),
370
- sg.Slider(
371
- range=(0.0, 1.0),
372
- key="rms_mix_rate",
373
- resolution=0.01,
374
- orientation="h",
375
- default_value=data.get("rms_mix_rate", 0),
376
- enable_events=True,
377
- ),
378
- ],
379
- [
380
- sg.Text(i18n("音高算法")),
381
- sg.Radio(
382
- "pm",
383
- "f0method",
384
- key="pm",
385
- default=data.get("pm", False),
386
- enable_events=True,
387
- ),
388
- sg.Radio(
389
- "harvest",
390
- "f0method",
391
- key="harvest",
392
- default=data.get("harvest", False),
393
- enable_events=True,
394
- ),
395
- sg.Radio(
396
- "crepe",
397
- "f0method",
398
- key="crepe",
399
- default=data.get("crepe", False),
400
- enable_events=True,
401
- ),
402
- sg.Radio(
403
- "rmvpe",
404
- "f0method",
405
- key="rmvpe",
406
- default=data.get("rmvpe", False),
407
- enable_events=True,
408
- ),
409
- sg.Radio(
410
- "fcpe",
411
- "f0method",
412
- key="fcpe",
413
- default=data.get("fcpe", True),
414
- enable_events=True,
415
- ),
416
- ],
417
- ],
418
- title=i18n("常规设置"),
419
- ),
420
- sg.Frame(
421
- layout=[
422
- [
423
- sg.Text(i18n("采样长度")),
424
- sg.Slider(
425
- range=(0.02, 1.5),
426
- key="block_time",
427
- resolution=0.01,
428
- orientation="h",
429
- default_value=data.get("block_time", 0.25),
430
- enable_events=True,
431
- ),
432
- ],
433
- # [
434
- # sg.Text("设备延迟"),
435
- # sg.Slider(
436
- # range=(0, 1),
437
- # key="device_latency",
438
- # resolution=0.001,
439
- # orientation="h",
440
- # default_value=data.get("device_latency", 0.1),
441
- # enable_events=True,
442
- # ),
443
- # ],
444
- [
445
- sg.Text(i18n("harvest进程数")),
446
- sg.Slider(
447
- range=(1, n_cpu),
448
- key="n_cpu",
449
- resolution=1,
450
- orientation="h",
451
- default_value=data.get(
452
- "n_cpu", min(self.gui_config.n_cpu, n_cpu)
453
- ),
454
- enable_events=True,
455
- ),
456
- ],
457
- [
458
- sg.Text(i18n("淡入淡出长度")),
459
- sg.Slider(
460
- range=(0.01, 0.15),
461
- key="crossfade_length",
462
- resolution=0.01,
463
- orientation="h",
464
- default_value=data.get("crossfade_length", 0.05),
465
- enable_events=True,
466
- ),
467
- ],
468
- [
469
- sg.Text(i18n("额外推理时长")),
470
- sg.Slider(
471
- range=(0.05, 5.00),
472
- key="extra_time",
473
- resolution=0.01,
474
- orientation="h",
475
- default_value=data.get("extra_time", 2.5),
476
- enable_events=True,
477
- ),
478
- ],
479
- [
480
- sg.Checkbox(
481
- i18n("输入降噪"),
482
- key="I_noise_reduce",
483
- enable_events=True,
484
- ),
485
- sg.Checkbox(
486
- i18n("输出降噪"),
487
- key="O_noise_reduce",
488
- enable_events=True,
489
- ),
490
- sg.Checkbox(
491
- i18n("启用相位声码器"),
492
- key="use_pv",
493
- default=data.get("use_pv", False),
494
- enable_events=True,
495
- ),
496
- # sg.Checkbox(
497
- # "JIT加速",
498
- # default=self.config.use_jit,
499
- # key="use_jit",
500
- # enable_events=False,
501
- # ),
502
- ],
503
- # [sg.Text("注:首次使用JIT加速时,会出现卡顿,\n 并伴随一些噪音,但这是正常现象!")],
504
- ],
505
- title=i18n("性能设置"),
506
- ),
507
- ],
508
- [
509
- sg.Button(i18n("开始音频转换"), key="start_vc"),
510
- sg.Button(i18n("停止音频转换"), key="stop_vc"),
511
- sg.Radio(
512
- i18n("输入监听"),
513
- "function",
514
- key="im",
515
- default=False,
516
- enable_events=True,
517
- ),
518
- sg.Radio(
519
- i18n("输出变声"),
520
- "function",
521
- key="vc",
522
- default=True,
523
- enable_events=True,
524
- ),
525
- sg.Text(i18n("算法延迟(ms):")),
526
- sg.Text("0", key="delay_time"),
527
- sg.Text(i18n("推理时间(ms):")),
528
- sg.Text("0", key="infer_time"),
529
- ],
530
- ]
531
- self.window = sg.Window("RVC - GUI", layout=layout, finalize=True)
532
- self.event_handler()
533
-
534
- def event_handler(self):
535
- global flag_vc
536
- while True:
537
- event, values = self.window.read()
538
- if event == sg.WINDOW_CLOSED:
539
- self.stop_stream()
540
- exit()
541
- if event == "reload_devices" or event == "sg_hostapi":
542
- self.gui_config.sg_hostapi = values["sg_hostapi"]
543
- self.update_devices(hostapi_name=values["sg_hostapi"])
544
- if self.gui_config.sg_hostapi not in self.hostapis:
545
- self.gui_config.sg_hostapi = self.hostapis[0]
546
- self.window["sg_hostapi"].Update(values=self.hostapis)
547
- self.window["sg_hostapi"].Update(value=self.gui_config.sg_hostapi)
548
- if (
549
- self.gui_config.sg_input_device not in self.input_devices
550
- and len(self.input_devices) > 0
551
- ):
552
- self.gui_config.sg_input_device = self.input_devices[0]
553
- self.window["sg_input_device"].Update(values=self.input_devices)
554
- self.window["sg_input_device"].Update(
555
- value=self.gui_config.sg_input_device
556
- )
557
- if self.gui_config.sg_output_device not in self.output_devices:
558
- self.gui_config.sg_output_device = self.output_devices[0]
559
- self.window["sg_output_device"].Update(values=self.output_devices)
560
- self.window["sg_output_device"].Update(
561
- value=self.gui_config.sg_output_device
562
- )
563
- if event == "start_vc" and not flag_vc:
564
- if self.set_values(values) == True:
565
- printt("cuda_is_available: %s", torch.cuda.is_available())
566
- self.start_vc()
567
- settings = {
568
- "pth_path": values["pth_path"],
569
- "index_path": values["index_path"],
570
- "sg_hostapi": values["sg_hostapi"],
571
- "sg_wasapi_exclusive": values["sg_wasapi_exclusive"],
572
- "sg_input_device": values["sg_input_device"],
573
- "sg_output_device": values["sg_output_device"],
574
- "sr_type": ["sr_model", "sr_device"][
575
- [
576
- values["sr_model"],
577
- values["sr_device"],
578
- ].index(True)
579
- ],
580
- "threhold": values["threhold"],
581
- "pitch": values["pitch"],
582
- "rms_mix_rate": values["rms_mix_rate"],
583
- "index_rate": values["index_rate"],
584
- # "device_latency": values["device_latency"],
585
- "block_time": values["block_time"],
586
- "crossfade_length": values["crossfade_length"],
587
- "extra_time": values["extra_time"],
588
- "n_cpu": values["n_cpu"],
589
- # "use_jit": values["use_jit"],
590
- "use_jit": False,
591
- "use_pv": values["use_pv"],
592
- "f0method": ["pm", "harvest", "crepe", "rmvpe", "fcpe"][
593
- [
594
- values["pm"],
595
- values["harvest"],
596
- values["crepe"],
597
- values["rmvpe"],
598
- values["fcpe"],
599
- ].index(True)
600
- ],
601
- }
602
- with open("configs/inuse/config.json", "w") as j:
603
- json.dump(settings, j)
604
- if self.stream is not None:
605
- self.delay_time = (
606
- self.stream.latency[-1]
607
- + values["block_time"]
608
- + values["crossfade_length"]
609
- + 0.01
610
- )
611
- if values["I_noise_reduce"]:
612
- self.delay_time += min(values["crossfade_length"], 0.04)
613
- self.window["sr_stream"].update(self.gui_config.samplerate)
614
- self.window["delay_time"].update(
615
- int(np.round(self.delay_time * 1000))
616
- )
617
- # Parameter hot update
618
- if event == "threhold":
619
- self.gui_config.threhold = values["threhold"]
620
- elif event == "pitch":
621
- self.gui_config.pitch = values["pitch"]
622
- if hasattr(self, "rvc"):
623
- self.rvc.change_key(values["pitch"])
624
- elif event == "formant":
625
- self.gui_config.formant = values["formant"]
626
- if hasattr(self, "rvc"):
627
- self.rvc.change_formant(values["formant"])
628
- elif event == "index_rate":
629
- self.gui_config.index_rate = values["index_rate"]
630
- if hasattr(self, "rvc"):
631
- self.rvc.change_index_rate(values["index_rate"])
632
- elif event == "rms_mix_rate":
633
- self.gui_config.rms_mix_rate = values["rms_mix_rate"]
634
- elif event in ["pm", "harvest", "crepe", "rmvpe", "fcpe"]:
635
- self.gui_config.f0method = event
636
- elif event == "I_noise_reduce":
637
- self.gui_config.I_noise_reduce = values["I_noise_reduce"]
638
- if self.stream is not None:
639
- self.delay_time += (
640
- 1 if values["I_noise_reduce"] else -1
641
- ) * min(values["crossfade_length"], 0.04)
642
- self.window["delay_time"].update(
643
- int(np.round(self.delay_time * 1000))
644
- )
645
- elif event == "O_noise_reduce":
646
- self.gui_config.O_noise_reduce = values["O_noise_reduce"]
647
- elif event == "use_pv":
648
- self.gui_config.use_pv = values["use_pv"]
649
- elif event in ["vc", "im"]:
650
- self.function = event
651
- elif event == "stop_vc" or event != "start_vc":
652
- # Other parameters do not support hot update
653
- self.stop_stream()
654
-
655
- def set_values(self, values):
656
- if len(values["pth_path"].strip()) == 0:
657
- sg.popup(i18n("请选择pth文件"))
658
- return False
659
- if len(values["index_path"].strip()) == 0:
660
- sg.popup(i18n("请选择index文件"))
661
- return False
662
- pattern = re.compile("[^\x00-\x7F]+")
663
- if pattern.findall(values["pth_path"]):
664
- sg.popup(i18n("pth文件路径不可包含中文"))
665
- return False
666
- if pattern.findall(values["index_path"]):
667
- sg.popup(i18n("index文件路径不可包含中文"))
668
- return False
669
- self.set_devices(values["sg_input_device"], values["sg_output_device"])
670
- self.config.use_jit = False # values["use_jit"]
671
- # self.device_latency = values["device_latency"]
672
- self.gui_config.sg_hostapi = values["sg_hostapi"]
673
- self.gui_config.sg_wasapi_exclusive = values["sg_wasapi_exclusive"]
674
- self.gui_config.sg_input_device = values["sg_input_device"]
675
- self.gui_config.sg_output_device = values["sg_output_device"]
676
- self.gui_config.pth_path = values["pth_path"]
677
- self.gui_config.index_path = values["index_path"]
678
- self.gui_config.sr_type = ["sr_model", "sr_device"][
679
- [
680
- values["sr_model"],
681
- values["sr_device"],
682
- ].index(True)
683
- ]
684
- self.gui_config.threhold = values["threhold"]
685
- self.gui_config.pitch = values["pitch"]
686
- self.gui_config.formant = values["formant"]
687
- self.gui_config.block_time = values["block_time"]
688
- self.gui_config.crossfade_time = values["crossfade_length"]
689
- self.gui_config.extra_time = values["extra_time"]
690
- self.gui_config.I_noise_reduce = values["I_noise_reduce"]
691
- self.gui_config.O_noise_reduce = values["O_noise_reduce"]
692
- self.gui_config.use_pv = values["use_pv"]
693
- self.gui_config.rms_mix_rate = values["rms_mix_rate"]
694
- self.gui_config.index_rate = values["index_rate"]
695
- self.gui_config.n_cpu = values["n_cpu"]
696
- self.gui_config.f0method = ["pm", "harvest", "crepe", "rmvpe", "fcpe"][
697
- [
698
- values["pm"],
699
- values["harvest"],
700
- values["crepe"],
701
- values["rmvpe"],
702
- values["fcpe"],
703
- ].index(True)
704
- ]
705
- return True
706
-
707
- def start_vc(self):
708
- torch.cuda.empty_cache()
709
- self.rvc = rvc_for_realtime.RVC(
710
- self.gui_config.pitch,
711
- self.gui_config.formant,
712
- self.gui_config.pth_path,
713
- self.gui_config.index_path,
714
- self.gui_config.index_rate,
715
- self.gui_config.n_cpu,
716
- inp_q,
717
- opt_q,
718
- self.config,
719
- self.rvc if hasattr(self, "rvc") else None,
720
- )
721
- self.gui_config.samplerate = (
722
- self.rvc.tgt_sr
723
- if self.gui_config.sr_type == "sr_model"
724
- else self.get_device_samplerate()
725
- )
726
- self.gui_config.channels = self.get_device_channels()
727
- self.zc = self.gui_config.samplerate // 100
728
- self.block_frame = (
729
- int(
730
- np.round(
731
- self.gui_config.block_time
732
- * self.gui_config.samplerate
733
- / self.zc
734
- )
735
- )
736
- * self.zc
737
- )
738
- self.block_frame_16k = 160 * self.block_frame // self.zc
739
- self.crossfade_frame = (
740
- int(
741
- np.round(
742
- self.gui_config.crossfade_time
743
- * self.gui_config.samplerate
744
- / self.zc
745
- )
746
- )
747
- * self.zc
748
- )
749
- self.sola_buffer_frame = min(self.crossfade_frame, 4 * self.zc)
750
- self.sola_search_frame = self.zc
751
- self.extra_frame = (
752
- int(
753
- np.round(
754
- self.gui_config.extra_time
755
- * self.gui_config.samplerate
756
- / self.zc
757
- )
758
- )
759
- * self.zc
760
- )
761
- self.input_wav: torch.Tensor = torch.zeros(
762
- self.extra_frame
763
- + self.crossfade_frame
764
- + self.sola_search_frame
765
- + self.block_frame,
766
- device=self.config.device,
767
- dtype=torch.float32,
768
- )
769
- self.input_wav_denoise: torch.Tensor = self.input_wav.clone()
770
- self.input_wav_res: torch.Tensor = torch.zeros(
771
- 160 * self.input_wav.shape[0] // self.zc,
772
- device=self.config.device,
773
- dtype=torch.float32,
774
- )
775
- self.rms_buffer: np.ndarray = np.zeros(4 * self.zc, dtype="float32")
776
- self.sola_buffer: torch.Tensor = torch.zeros(
777
- self.sola_buffer_frame, device=self.config.device, dtype=torch.float32
778
- )
779
- self.nr_buffer: torch.Tensor = self.sola_buffer.clone()
780
- self.output_buffer: torch.Tensor = self.input_wav.clone()
781
- self.skip_head = self.extra_frame // self.zc
782
- self.return_length = (
783
- self.block_frame + self.sola_buffer_frame + self.sola_search_frame
784
- ) // self.zc
785
- self.fade_in_window: torch.Tensor = (
786
- torch.sin(
787
- 0.5
788
- * np.pi
789
- * torch.linspace(
790
- 0.0,
791
- 1.0,
792
- steps=self.sola_buffer_frame,
793
- device=self.config.device,
794
- dtype=torch.float32,
795
- )
796
- )
797
- ** 2
798
- )
799
- self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
800
- self.resampler = tat.Resample(
801
- orig_freq=self.gui_config.samplerate,
802
- new_freq=16000,
803
- dtype=torch.float32,
804
- ).to(self.config.device)
805
- if self.rvc.tgt_sr != self.gui_config.samplerate:
806
- self.resampler2 = tat.Resample(
807
- orig_freq=self.rvc.tgt_sr,
808
- new_freq=self.gui_config.samplerate,
809
- dtype=torch.float32,
810
- ).to(self.config.device)
811
- else:
812
- self.resampler2 = None
813
- self.tg = TorchGate(
814
- sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9
815
- ).to(self.config.device)
816
- self.start_stream()
817
-
818
- def start_stream(self):
819
- global flag_vc
820
- if not flag_vc:
821
- flag_vc = True
822
- if (
823
- "WASAPI" in self.gui_config.sg_hostapi
824
- and self.gui_config.sg_wasapi_exclusive
825
- ):
826
- extra_settings = sd.WasapiSettings(exclusive=True)
827
- else:
828
- extra_settings = None
829
- self.stream = sd.Stream(
830
- callback=self.audio_callback,
831
- blocksize=self.block_frame,
832
- samplerate=self.gui_config.samplerate,
833
- channels=self.gui_config.channels,
834
- dtype="float32",
835
- extra_settings=extra_settings,
836
- )
837
- self.stream.start()
838
-
839
- def stop_stream(self):
840
- global flag_vc
841
- if flag_vc:
842
- flag_vc = False
843
- if self.stream is not None:
844
- self.stream.abort()
845
- self.stream.close()
846
- self.stream = None
847
-
848
- def audio_callback(
849
- self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
850
- ):
851
- """
852
- 音频处理
853
- """
854
- global flag_vc
855
- start_time = time.perf_counter()
856
- indata = librosa.to_mono(indata.T)
857
- if self.gui_config.threhold > -60:
858
- indata = np.append(self.rms_buffer, indata)
859
- rms = librosa.feature.rms(
860
- y=indata, frame_length=4 * self.zc, hop_length=self.zc
861
- )[:, 2:]
862
- self.rms_buffer[:] = indata[-4 * self.zc :]
863
- indata = indata[2 * self.zc - self.zc // 2 :]
864
- db_threhold = (
865
- librosa.amplitude_to_db(rms, ref=1.0)[0] < self.gui_config.threhold
866
- )
867
- for i in range(db_threhold.shape[0]):
868
- if db_threhold[i]:
869
- indata[i * self.zc : (i + 1) * self.zc] = 0
870
- indata = indata[self.zc // 2 :]
871
- self.input_wav[: -self.block_frame] = self.input_wav[
872
- self.block_frame :
873
- ].clone()
874
- self.input_wav[-indata.shape[0] :] = torch.from_numpy(indata).to(
875
- self.config.device
876
- )
877
- self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[
878
- self.block_frame_16k :
879
- ].clone()
880
- # input noise reduction and resampling
881
- if self.gui_config.I_noise_reduce:
882
- self.input_wav_denoise[: -self.block_frame] = self.input_wav_denoise[
883
- self.block_frame :
884
- ].clone()
885
- input_wav = self.input_wav[-self.sola_buffer_frame - self.block_frame :]
886
- input_wav = self.tg(
887
- input_wav.unsqueeze(0), self.input_wav.unsqueeze(0)
888
- ).squeeze(0)
889
- input_wav[: self.sola_buffer_frame] *= self.fade_in_window
890
- input_wav[: self.sola_buffer_frame] += (
891
- self.nr_buffer * self.fade_out_window
892
- )
893
- self.input_wav_denoise[-self.block_frame :] = input_wav[
894
- : self.block_frame
895
- ]
896
- self.nr_buffer[:] = input_wav[self.block_frame :]
897
- self.input_wav_res[-self.block_frame_16k - 160 :] = self.resampler(
898
- self.input_wav_denoise[-self.block_frame - 2 * self.zc :]
899
- )[160:]
900
- else:
901
- self.input_wav_res[-160 * (indata.shape[0] // self.zc + 1) :] = (
902
- self.resampler(self.input_wav[-indata.shape[0] - 2 * self.zc :])[
903
- 160:
904
- ]
905
- )
906
- # infer
907
- if self.function == "vc":
908
- infer_wav = self.rvc.infer(
909
- self.input_wav_res,
910
- self.block_frame_16k,
911
- self.skip_head,
912
- self.return_length,
913
- self.gui_config.f0method,
914
- )
915
- if self.resampler2 is not None:
916
- infer_wav = self.resampler2(infer_wav)
917
- elif self.gui_config.I_noise_reduce:
918
- infer_wav = self.input_wav_denoise[self.extra_frame :].clone()
919
- else:
920
- infer_wav = self.input_wav[self.extra_frame :].clone()
921
- # output noise reduction
922
- if self.gui_config.O_noise_reduce and self.function == "vc":
923
- self.output_buffer[: -self.block_frame] = self.output_buffer[
924
- self.block_frame :
925
- ].clone()
926
- self.output_buffer[-self.block_frame :] = infer_wav[-self.block_frame :]
927
- infer_wav = self.tg(
928
- infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)
929
- ).squeeze(0)
930
- # volume envelop mixing
931
- if self.gui_config.rms_mix_rate < 1 and self.function == "vc":
932
- if self.gui_config.I_noise_reduce:
933
- input_wav = self.input_wav_denoise[self.extra_frame :]
934
- else:
935
- input_wav = self.input_wav[self.extra_frame :]
936
- rms1 = librosa.feature.rms(
937
- y=input_wav[: infer_wav.shape[0]].cpu().numpy(),
938
- frame_length=4 * self.zc,
939
- hop_length=self.zc,
940
- )
941
- rms1 = torch.from_numpy(rms1).to(self.config.device)
942
- rms1 = F.interpolate(
943
- rms1.unsqueeze(0),
944
- size=infer_wav.shape[0] + 1,
945
- mode="linear",
946
- align_corners=True,
947
- )[0, 0, :-1]
948
- rms2 = librosa.feature.rms(
949
- y=infer_wav[:].cpu().numpy(),
950
- frame_length=4 * self.zc,
951
- hop_length=self.zc,
952
- )
953
- rms2 = torch.from_numpy(rms2).to(self.config.device)
954
- rms2 = F.interpolate(
955
- rms2.unsqueeze(0),
956
- size=infer_wav.shape[0] + 1,
957
- mode="linear",
958
- align_corners=True,
959
- )[0, 0, :-1]
960
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
961
- infer_wav *= torch.pow(
962
- rms1 / rms2, torch.tensor(1 - self.gui_config.rms_mix_rate)
963
- )
964
- # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
965
- conv_input = infer_wav[
966
- None, None, : self.sola_buffer_frame + self.sola_search_frame
967
- ]
968
- cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
969
- cor_den = torch.sqrt(
970
- F.conv1d(
971
- conv_input**2,
972
- torch.ones(1, 1, self.sola_buffer_frame, device=self.config.device),
973
- )
974
- + 1e-8
975
- )
976
- if sys.platform == "darwin":
977
- _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
978
- sola_offset = sola_offset.item()
979
- else:
980
- sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
981
- printt("sola_offset = %d", int(sola_offset))
982
- infer_wav = infer_wav[sola_offset:]
983
- if "privateuseone" in str(self.config.device) or not self.gui_config.use_pv:
984
- infer_wav[: self.sola_buffer_frame] *= self.fade_in_window
985
- infer_wav[: self.sola_buffer_frame] += (
986
- self.sola_buffer * self.fade_out_window
987
- )
988
- else:
989
- infer_wav[: self.sola_buffer_frame] = phase_vocoder(
990
- self.sola_buffer,
991
- infer_wav[: self.sola_buffer_frame],
992
- self.fade_out_window,
993
- self.fade_in_window,
994
- )
995
- self.sola_buffer[:] = infer_wav[
996
- self.block_frame : self.block_frame + self.sola_buffer_frame
997
- ]
998
- outdata[:] = (
999
- infer_wav[: self.block_frame]
1000
- .repeat(self.gui_config.channels, 1)
1001
- .t()
1002
- .cpu()
1003
- .numpy()
1004
- )
1005
- total_time = time.perf_counter() - start_time
1006
- if flag_vc:
1007
- self.window["infer_time"].update(int(total_time * 1000))
1008
- printt("Infer time: %.2f", total_time)
1009
-
1010
- def update_devices(self, hostapi_name=None):
1011
- """获取设备列表"""
1012
- global flag_vc
1013
- flag_vc = False
1014
- sd._terminate()
1015
- sd._initialize()
1016
- devices = sd.query_devices()
1017
- hostapis = sd.query_hostapis()
1018
- for hostapi in hostapis:
1019
- for device_idx in hostapi["devices"]:
1020
- devices[device_idx]["hostapi_name"] = hostapi["name"]
1021
- self.hostapis = [hostapi["name"] for hostapi in hostapis]
1022
- if hostapi_name not in self.hostapis:
1023
- hostapi_name = self.hostapis[0]
1024
- self.input_devices = [
1025
- d["name"]
1026
- for d in devices
1027
- if d["max_input_channels"] > 0 and d["hostapi_name"] == hostapi_name
1028
- ]
1029
- self.output_devices = [
1030
- d["name"]
1031
- for d in devices
1032
- if d["max_output_channels"] > 0 and d["hostapi_name"] == hostapi_name
1033
- ]
1034
- self.input_devices_indices = [
1035
- d["index"] if "index" in d else d["name"]
1036
- for d in devices
1037
- if d["max_input_channels"] > 0 and d["hostapi_name"] == hostapi_name
1038
- ]
1039
- self.output_devices_indices = [
1040
- d["index"] if "index" in d else d["name"]
1041
- for d in devices
1042
- if d["max_output_channels"] > 0 and d["hostapi_name"] == hostapi_name
1043
- ]
1044
-
1045
- def set_devices(self, input_device, output_device):
1046
- """设置输出设备"""
1047
- sd.default.device[0] = self.input_devices_indices[
1048
- self.input_devices.index(input_device)
1049
- ]
1050
- sd.default.device[1] = self.output_devices_indices[
1051
- self.output_devices.index(output_device)
1052
- ]
1053
- printt("Input device: %s:%s", str(sd.default.device[0]), input_device)
1054
- printt("Output device: %s:%s", str(sd.default.device[1]), output_device)
1055
-
1056
- def get_device_samplerate(self):
1057
- return int(
1058
- sd.query_devices(device=sd.default.device[0])["default_samplerate"]
1059
- )
1060
-
1061
- def get_device_channels(self):
1062
- max_input_channels = sd.query_devices(device=sd.default.device[0])[
1063
- "max_input_channels"
1064
- ]
1065
- max_output_channels = sd.query_devices(device=sd.default.device[1])[
1066
- "max_output_channels"
1067
- ]
1068
- return min(max_input_channels, max_output_channels, 2)
1069
-
1070
- gui = GUI()