Riko arudianshā commited on
Commit
f865dbd
1 Parent(s): cefb295

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2094 -0
app.py ADDED
@@ -0,0 +1,2094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess, torch, os, traceback, sys, warnings, shutil, numpy as np
2
+ from mega import Mega
3
+ os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
4
+ import threading
5
+ from time import sleep
6
+ from subprocess import Popen
7
+ import faiss
8
+ from random import shuffle
9
+ import json, datetime, requests
10
+ from gtts import gTTS
11
+ now_dir = os.getcwd()
12
+ sys.path.append(now_dir)
13
+ tmp = os.path.join(now_dir, "TEMP")
14
+ shutil.rmtree(tmp, ignore_errors=True)
15
+ shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
16
+ os.makedirs(tmp, exist_ok=True)
17
+ os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
18
+ os.makedirs(os.path.join(now_dir, "weights"), exist_ok=True)
19
+ os.environ["TEMP"] = tmp
20
+ warnings.filterwarnings("ignore")
21
+ torch.manual_seed(114514)
22
+
23
+ import signal
24
+
25
+ import math
26
+
27
+ from utils import load_audio, CSVutil
28
+
29
+ global DoFormant, Quefrency, Timbre
30
+
31
+ if not os.path.isdir('csvdb/'):
32
+ os.makedirs('csvdb')
33
+ frmnt, stp = open("csvdb/formanting.csv", 'w'), open("csvdb/stop.csv", 'w')
34
+ frmnt.close()
35
+ stp.close()
36
+
37
+ try:
38
+ DoFormant, Quefrency, Timbre = CSVutil('csvdb/formanting.csv', 'r', 'formanting')
39
+ DoFormant = (
40
+ lambda DoFormant: True if DoFormant.lower() == 'true' else (False if DoFormant.lower() == 'false' else DoFormant)
41
+ )(DoFormant)
42
+ except (ValueError, TypeError, IndexError):
43
+ DoFormant, Quefrency, Timbre = False, 1.0, 1.0
44
+ CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, Quefrency, Timbre)
45
+
46
+ def download_models():
47
+ # Download hubert base model if not present
48
+ if not os.path.isfile('./hubert_base.pt'):
49
+ response = requests.get('https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt')
50
+
51
+ if response.status_code == 200:
52
+ with open('./hubert_base.pt', 'wb') as f:
53
+ f.write(response.content)
54
+ print("Downloaded hubert base model file successfully. File saved to ./hubert_base.pt.")
55
+ else:
56
+ raise Exception("Failed to download hubert base model file. Status code: " + str(response.status_code) + ".")
57
+
58
+ # Download rmvpe model if not present
59
+ if not os.path.isfile('./rmvpe.pt'):
60
+ response = requests.get('https://drive.usercontent.google.com/download?id=1Hkn4kNuVFRCNQwyxQFRtmzmMBGpQxptI&export=download&authuser=0&confirm=t&uuid=0b3a40de-465b-4c65-8c41-135b0b45c3f7&at=APZUnTV3lA3LnyTbeuduura6Dmi2:1693724254058')
61
+
62
+ if response.status_code == 200:
63
+ with open('./rmvpe.pt', 'wb') as f:
64
+ f.write(response.content)
65
+ print("Downloaded rmvpe model file successfully. File saved to ./rmvpe.pt.")
66
+ else:
67
+ raise Exception("Failed to download rmvpe model file. Status code: " + str(response.status_code) + ".")
68
+
69
+ download_models()
70
+
71
+ print("\n-------------------------------\n 🗣️ RVC Easy GUI (Colab Edition)\n-------------------------------\n")
72
+
73
+
74
+
75
+
76
+ print("\n----------\n other repository : https://github.com/Tiger14n/RVC-GUI : https://github.com/HoshioPilio/Retrieval-based-Voice-Conversion : https://github.com/Mangio621/Mangio-RVC-Fork : https://github.com/IAHispano/Applio-RVC-Fork : https://github.com/777gt/-EVC-\n----------\n")
77
+
78
+
79
+ def formant_apply(qfrency, tmbre):
80
+ Quefrency = qfrency
81
+ Timbre = tmbre
82
+ DoFormant = True
83
+ CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre)
84
+
85
+ return ({"value": Quefrency, "__type__": "update"}, {"value": Timbre, "__type__": "update"})
86
+
87
+ def get_fshift_presets():
88
+ fshift_presets_list = []
89
+ for dirpath, _, filenames in os.walk("./formantshiftcfg/"):
90
+ for filename in filenames:
91
+ if filename.endswith(".txt"):
92
+ fshift_presets_list.append(os.path.join(dirpath,filename).replace('\\','/'))
93
+
94
+ if len(fshift_presets_list) > 0:
95
+ return fshift_presets_list
96
+ else:
97
+ return ''
98
+
99
+
100
+
101
+ def formant_enabled(cbox, qfrency, tmbre, frmntapply, formantpreset, formant_refresh_button):
102
+
103
+ if (cbox):
104
+
105
+ DoFormant = True
106
+ CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre)
107
+ #print(f"is checked? - {cbox}\ngot {DoFormant}")
108
+
109
+ return (
110
+ {"value": True, "__type__": "update"},
111
+ {"visible": True, "__type__": "update"},
112
+ {"visible": True, "__type__": "update"},
113
+ {"visible": True, "__type__": "update"},
114
+ {"visible": True, "__type__": "update"},
115
+ {"visible": True, "__type__": "update"},
116
+ )
117
+
118
+
119
+ else:
120
+
121
+ DoFormant = False
122
+ CSVutil('csvdb/formanting.csv', 'w+', 'formanting', DoFormant, qfrency, tmbre)
123
+
124
+ #print(f"is checked? - {cbox}\ngot {DoFormant}")
125
+ return (
126
+ {"value": False, "__type__": "update"},
127
+ {"visible": False, "__type__": "update"},
128
+ {"visible": False, "__type__": "update"},
129
+ {"visible": False, "__type__": "update"},
130
+ {"visible": False, "__type__": "update"},
131
+ {"visible": False, "__type__": "update"},
132
+ {"visible": False, "__type__": "update"},
133
+ )
134
+
135
+
136
+
137
+ def preset_apply(preset, qfer, tmbr):
138
+ if str(preset) != '':
139
+ with open(str(preset), 'r') as p:
140
+ content = p.readlines()
141
+ qfer, tmbr = content[0].split('\n')[0], content[1]
142
+
143
+ formant_apply(qfer, tmbr)
144
+ else:
145
+ pass
146
+ return ({"value": qfer, "__type__": "update"}, {"value": tmbr, "__type__": "update"})
147
+
148
+ def update_fshift_presets(preset, qfrency, tmbre):
149
+
150
+ qfrency, tmbre = preset_apply(preset, qfrency, tmbre)
151
+
152
+ if (str(preset) != ''):
153
+ with open(str(preset), 'r') as p:
154
+ content = p.readlines()
155
+ qfrency, tmbre = content[0].split('\n')[0], content[1]
156
+
157
+ formant_apply(qfrency, tmbre)
158
+ else:
159
+ pass
160
+ return (
161
+ {"choices": get_fshift_presets(), "__type__": "update"},
162
+ {"value": qfrency, "__type__": "update"},
163
+ {"value": tmbre, "__type__": "update"},
164
+ )
165
+
166
+ i18n = I18nAuto()
167
+ #i18n.print()
168
+ # 判断是否有能用来训练和加速推理的N卡
169
+ ngpu = torch.cuda.device_count()
170
+ gpu_infos = []
171
+ mem = []
172
+ if (not torch.cuda.is_available()) or ngpu == 0:
173
+ if_gpu_ok = False
174
+ else:
175
+ if_gpu_ok = False
176
+ for i in range(ngpu):
177
+ gpu_name = torch.cuda.get_device_name(i)
178
+ if (
179
+ "10" in gpu_name
180
+ or "16" in gpu_name
181
+ or "20" in gpu_name
182
+ or "30" in gpu_name
183
+ or "40" in gpu_name
184
+ or "A2" in gpu_name.upper()
185
+ or "A3" in gpu_name.upper()
186
+ or "A4" in gpu_name.upper()
187
+ or "P4" in gpu_name.upper()
188
+ or "A50" in gpu_name.upper()
189
+ or "A60" in gpu_name.upper()
190
+ or "70" in gpu_name
191
+ or "80" in gpu_name
192
+ or "90" in gpu_name
193
+ or "M4" in gpu_name.upper()
194
+ or "T4" in gpu_name.upper()
195
+ or "TITAN" in gpu_name.upper()
196
+ ): # A10#A100#V100#A40#P40#M40#K80#A4500
197
+ if_gpu_ok = True # 至少有一张能用的N卡
198
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
199
+ mem.append(
200
+ int(
201
+ torch.cuda.get_device_properties(i).total_memory
202
+ / 1024
203
+ / 1024
204
+ / 1024
205
+ + 0.4
206
+ )
207
+ )
208
+ if if_gpu_ok == True and len(gpu_infos) > 0:
209
+ gpu_info = "\n".join(gpu_infos)
210
+ default_batch_size = min(mem) // 2
211
+ else:
212
+ gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
213
+ default_batch_size = 1
214
+ gpus = "-".join([i[0] for i in gpu_infos])
215
+ from lib.infer_pack.models import (
216
+ SynthesizerTrnMs256NSFsid,
217
+ SynthesizerTrnMs256NSFsid_nono,
218
+ SynthesizerTrnMs768NSFsid,
219
+ SynthesizerTrnMs768NSFsid_nono,
220
+ )
221
+ import soundfile as sf
222
+ from fairseq import checkpoint_utils
223
+ import gradio as gr
224
+ import logging
225
+ from vc_infer_pipeline import VC
226
+ from config import Config
227
+
228
+ config = Config()
229
+ # from trainset_preprocess_pipeline import PreProcess
230
+ logging.getLogger("numba").setLevel(logging.WARNING)
231
+
232
+ hubert_model = None
233
+
234
+ def load_hubert():
235
+ global hubert_model
236
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
237
+ ["hubert_base.pt"],
238
+ suffix="",
239
+ )
240
+ hubert_model = models[0]
241
+ hubert_model = hubert_model.to(config.device)
242
+ if config.is_half:
243
+ hubert_model = hubert_model.half()
244
+ else:
245
+ hubert_model = hubert_model.float()
246
+ hubert_model.eval()
247
+
248
+
249
+ weight_root = "weights"
250
+ index_root = "logs"
251
+ names = []
252
+ for name in os.listdir(weight_root):
253
+ if name.endswith(".pth"):
254
+ names.append(name)
255
+ index_paths = []
256
+ for root, dirs, files in os.walk(index_root, topdown=False):
257
+ for name in files:
258
+ if name.endswith(".index") and "trained" not in name:
259
+ index_paths.append("%s/%s" % (root, name))
260
+
261
+
262
+
263
+ def vc_single(
264
+ sid,
265
+ input_audio_path,
266
+ f0_up_key,
267
+ f0_file,
268
+ f0_method,
269
+ file_index,
270
+ #file_index2,
271
+ # file_big_npy,
272
+ index_rate,
273
+ filter_radius,
274
+ resample_sr,
275
+ rms_mix_rate,
276
+ protect,
277
+ crepe_hop_length,
278
+ ): # spk_item, input_audio0, vc_transform0,f0_file,f0method0
279
+ global tgt_sr, net_g, vc, hubert_model, version
280
+ if input_audio_path is None:
281
+ return "You need to upload an audio", None
282
+ f0_up_key = int(f0_up_key)
283
+ try:
284
+ audio = load_audio(input_audio_path, 16000, DoFormant, Quefrency, Timbre)
285
+ audio_max = np.abs(audio).max() / 0.95
286
+ if audio_max > 1:
287
+ audio /= audio_max
288
+ times = [0, 0, 0]
289
+ if hubert_model == None:
290
+ load_hubert()
291
+ if_f0 = cpt.get("f0", 1)
292
+ file_index = (
293
+ (
294
+ file_index.strip(" ")
295
+ .strip('"')
296
+ .strip("\n")
297
+ .strip('"')
298
+ .strip(" ")
299
+ .replace("trained", "added")
300
+ )
301
+ ) # 防止小白写错,自动帮他替换掉
302
+ # file_big_npy = (
303
+ # file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
304
+ # )
305
+ audio_opt = vc.pipeline(
306
+ hubert_model,
307
+ net_g,
308
+ sid,
309
+ audio,
310
+ input_audio_path,
311
+ times,
312
+ f0_up_key,
313
+ f0_method,
314
+ file_index,
315
+ # file_big_npy,
316
+ index_rate,
317
+ if_f0,
318
+ filter_radius,
319
+ tgt_sr,
320
+ resample_sr,
321
+ rms_mix_rate,
322
+ version,
323
+ protect,
324
+ crepe_hop_length,
325
+ f0_file=f0_file,
326
+ )
327
+ if resample_sr >= 16000 and tgt_sr != resample_sr:
328
+ tgt_sr = resample_sr
329
+ index_info = (
330
+ "Using index:%s." % file_index
331
+ if os.path.exists(file_index)
332
+ else "Index not used."
333
+ )
334
+ return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % (
335
+ index_info,
336
+ times[0],
337
+ times[1],
338
+ times[2],
339
+ ), (tgt_sr, audio_opt)
340
+ except:
341
+ info = traceback.format_exc()
342
+ print(info)
343
+ return info, (None, None)
344
+
345
+
346
+ def vc_multi(
347
+ sid,
348
+ dir_path,
349
+ opt_root,
350
+ paths,
351
+ f0_up_key,
352
+ f0_method,
353
+ file_index,
354
+ file_index2,
355
+ # file_big_npy,
356
+ index_rate,
357
+ filter_radius,
358
+ resample_sr,
359
+ rms_mix_rate,
360
+ protect,
361
+ format1,
362
+ crepe_hop_length,
363
+ ):
364
+ try:
365
+ dir_path = (
366
+ dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
367
+ ) # 防止小白拷路径头尾带了空格和"和回车
368
+ opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
369
+ os.makedirs(opt_root, exist_ok=True)
370
+ try:
371
+ if dir_path != "":
372
+ paths = [os.path.join(dir_path, name) for name in os.listdir(dir_path)]
373
+ else:
374
+ paths = [path.name for path in paths]
375
+ except:
376
+ traceback.print_exc()
377
+ paths = [path.name for path in paths]
378
+ infos = []
379
+ for path in paths:
380
+ info, opt = vc_single(
381
+ sid,
382
+ path,
383
+ f0_up_key,
384
+ None,
385
+ f0_method,
386
+ file_index,
387
+ # file_big_npy,
388
+ index_rate,
389
+ filter_radius,
390
+ resample_sr,
391
+ rms_mix_rate,
392
+ protect,
393
+ crepe_hop_length
394
+ )
395
+ if "Success" in info:
396
+ try:
397
+ tgt_sr, audio_opt = opt
398
+ if format1 in ["wav", "flac"]:
399
+ sf.write(
400
+ "%s/%s.%s" % (opt_root, os.path.basename(path), format1),
401
+ audio_opt,
402
+ tgt_sr,
403
+ )
404
+ else:
405
+ path = "%s/%s.wav" % (opt_root, os.path.basename(path))
406
+ sf.write(
407
+ path,
408
+ audio_opt,
409
+ tgt_sr,
410
+ )
411
+ if os.path.exists(path):
412
+ os.system(
413
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
414
+ % (path, path[:-4] + ".%s" % format1)
415
+ )
416
+ except:
417
+ info += traceback.format_exc()
418
+ infos.append("%s->%s" % (os.path.basename(path), info))
419
+ yield "\n".join(infos)
420
+ yield "\n".join(infos)
421
+ except:
422
+ yield traceback.format_exc()
423
+
424
+ # 一个选项卡全局只能有一个音色
425
+ def get_vc(sid):
426
+ global n_spk, tgt_sr, net_g, vc, cpt, version
427
+ if sid == "" or sid == []:
428
+ global hubert_model
429
+ if hubert_model != None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
430
+ print("clean_empty_cache")
431
+ del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt
432
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
433
+ if torch.cuda.is_available():
434
+ torch.cuda.empty_cache()
435
+ ###楼下不这么折腾清理不干净
436
+ if_f0 = cpt.get("f0", 1)
437
+ version = cpt.get("version", "v1")
438
+ if version == "v1":
439
+ if if_f0 == 1:
440
+ net_g = SynthesizerTrnMs256NSFsid(
441
+ *cpt["config"], is_half=config.is_half
442
+ )
443
+ else:
444
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
445
+ elif version == "v2":
446
+ if if_f0 == 1:
447
+ net_g = SynthesizerTrnMs768NSFsid(
448
+ *cpt["config"], is_half=config.is_half
449
+ )
450
+ else:
451
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
452
+ del net_g, cpt
453
+ if torch.cuda.is_available():
454
+ torch.cuda.empty_cache()
455
+ cpt = None
456
+ return {"visible": False, "__type__": "update"}
457
+ person = "%s/%s" % (weight_root, sid)
458
+ print("loading %s" % person)
459
+ cpt = torch.load(person, map_location="cpu")
460
+ tgt_sr = cpt["config"][-1]
461
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
462
+ if_f0 = cpt.get("f0", 1)
463
+ version = cpt.get("version", "v1")
464
+ if version == "v1":
465
+ if if_f0 == 1:
466
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
467
+ else:
468
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
469
+ elif version == "v2":
470
+ if if_f0 == 1:
471
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
472
+ else:
473
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
474
+ del net_g.enc_q
475
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
476
+ net_g.eval().to(config.device)
477
+ if config.is_half:
478
+ net_g = net_g.half()
479
+ else:
480
+ net_g = net_g.float()
481
+ vc = VC(tgt_sr, config)
482
+ n_spk = cpt["config"][-3]
483
+ return {"visible": False, "maximum": n_spk, "__type__": "update"}
484
+
485
+
486
+ def change_choices():
487
+ names = []
488
+ for name in os.listdir(weight_root):
489
+ if name.endswith(".pth"):
490
+ names.append(name)
491
+ index_paths = []
492
+ for root, dirs, files in os.walk(index_root, topdown=False):
493
+ for name in files:
494
+ if name.endswith(".index") and "trained" not in name:
495
+ index_paths.append("%s/%s" % (root, name))
496
+ return {"choices": sorted(names), "__type__": "update"}, {
497
+ "choices": sorted(index_paths),
498
+ "__type__": "update",
499
+ }
500
+
501
+
502
+ def clean():
503
+ return {"value": "", "__type__": "update"}
504
+
505
+
506
+ sr_dict = {
507
+ "32k": 32000,
508
+ "40k": 40000,
509
+ "48k": 48000,
510
+ }
511
+
512
+
513
+ def if_done(done, p):
514
+ while 1:
515
+ if p.poll() == None:
516
+ sleep(0.5)
517
+ else:
518
+ break
519
+ done[0] = True
520
+
521
+
522
+ def if_done_multi(done, ps):
523
+ while 1:
524
+ # poll==None代表进程未结束
525
+ # 只要有一个进程未结束都不停
526
+ flag = 1
527
+ for p in ps:
528
+ if p.poll() == None:
529
+ flag = 0
530
+ sleep(0.5)
531
+ break
532
+ if flag == 1:
533
+ break
534
+ done[0] = True
535
+
536
+
537
+ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
538
+ sr = sr_dict[sr]
539
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
540
+ f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w")
541
+ f.close()
542
+ cmd = (
543
+ config.python_cmd
544
+ + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "
545
+ % (trainset_dir, sr, n_p, now_dir, exp_dir)
546
+ + str(config.noparallel)
547
+ )
548
+ print(cmd)
549
+ p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
550
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
551
+ done = [False]
552
+ threading.Thread(
553
+ target=if_done,
554
+ args=(
555
+ done,
556
+ p,
557
+ ),
558
+ ).start()
559
+ while 1:
560
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
561
+ yield (f.read())
562
+ sleep(1)
563
+ if done[0] == True:
564
+ break
565
+ with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
566
+ log = f.read()
567
+ print(log)
568
+ yield log
569
+
570
+ # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
571
+ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl):
572
+ gpus = gpus.split("-")
573
+ os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
574
+ f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w")
575
+ f.close()
576
+ if if_f0:
577
+ cmd = config.python_cmd + " extract_f0_print.py %s/logs/%s %s %s %s" % (
578
+ now_dir,
579
+ exp_dir,
580
+ n_p,
581
+ f0method,
582
+ echl,
583
+ )
584
+ print(cmd)
585
+ p = Popen(cmd, shell=True, cwd=now_dir) # , stdin=PIPE, stdout=PIPE,stderr=PIPE
586
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
587
+ done = [False]
588
+ threading.Thread(
589
+ target=if_done,
590
+ args=(
591
+ done,
592
+ p,
593
+ ),
594
+ ).start()
595
+ while 1:
596
+ with open(
597
+ "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r"
598
+ ) as f:
599
+ yield (f.read())
600
+ sleep(1)
601
+ if done[0] == True:
602
+ break
603
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
604
+ log = f.read()
605
+ print(log)
606
+ yield log
607
+ ####对不同part分别开多进程
608
+ """
609
+ n_part=int(sys.argv[1])
610
+ i_part=int(sys.argv[2])
611
+ i_gpu=sys.argv[3]
612
+ exp_dir=sys.argv[4]
613
+ os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
614
+ """
615
+ leng = len(gpus)
616
+ ps = []
617
+ for idx, n_g in enumerate(gpus):
618
+ cmd = (
619
+ config.python_cmd
620
+ + " extract_feature_print.py %s %s %s %s %s/logs/%s %s"
621
+ % (
622
+ config.device,
623
+ leng,
624
+ idx,
625
+ n_g,
626
+ now_dir,
627
+ exp_dir,
628
+ version19,
629
+ )
630
+ )
631
+ print(cmd)
632
+ p = Popen(
633
+ cmd, shell=True, cwd=now_dir
634
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
635
+ ps.append(p)
636
+ ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
637
+ done = [False]
638
+ threading.Thread(
639
+ target=if_done_multi,
640
+ args=(
641
+ done,
642
+ ps,
643
+ ),
644
+ ).start()
645
+ while 1:
646
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
647
+ yield (f.read())
648
+ sleep(1)
649
+ if done[0] == True:
650
+ break
651
+ with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
652
+ log = f.read()
653
+ print(log)
654
+ yield log
655
+
656
+
657
+ def change_sr2(sr2, if_f0_3, version19):
658
+ path_str = "" if version19 == "v1" else "_v2"
659
+ f0_str = "f0" if if_f0_3 else ""
660
+ if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK)
661
+ if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK)
662
+ if (if_pretrained_generator_exist == False):
663
+ print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
664
+ if (if_pretrained_discriminator_exist == False):
665
+ print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
666
+ return (
667
+ ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "",
668
+ ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "",
669
+ {"visible": True, "__type__": "update"}
670
+ )
671
+
672
+ def change_version19(sr2, if_f0_3, version19):
673
+ path_str = "" if version19 == "v1" else "_v2"
674
+ f0_str = "f0" if if_f0_3 else ""
675
+ if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK)
676
+ if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK)
677
+ if (if_pretrained_generator_exist == False):
678
+ print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
679
+ if (if_pretrained_discriminator_exist == False):
680
+ print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
681
+ return (
682
+ ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "",
683
+ ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "",
684
+ )
685
+
686
+
687
+ def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
688
+ path_str = "" if version19 == "v1" else "_v2"
689
+ if_pretrained_generator_exist = os.access("pretrained%s/f0G%s.pth" % (path_str, sr2), os.F_OK)
690
+ if_pretrained_discriminator_exist = os.access("pretrained%s/f0D%s.pth" % (path_str, sr2), os.F_OK)
691
+ if (if_pretrained_generator_exist == False):
692
+ print("pretrained%s/f0G%s.pth" % (path_str, sr2), "not exist, will not use pretrained model")
693
+ if (if_pretrained_discriminator_exist == False):
694
+ print("pretrained%s/f0D%s.pth" % (path_str, sr2), "not exist, will not use pretrained model")
695
+ if if_f0_3:
696
+ return (
697
+ {"visible": True, "__type__": "update"},
698
+ "pretrained%s/f0G%s.pth" % (path_str, sr2) if if_pretrained_generator_exist else "",
699
+ "pretrained%s/f0D%s.pth" % (path_str, sr2) if if_pretrained_discriminator_exist else "",
700
+ )
701
+ return (
702
+ {"visible": False, "__type__": "update"},
703
+ ("pretrained%s/G%s.pth" % (path_str, sr2)) if if_pretrained_generator_exist else "",
704
+ ("pretrained%s/D%s.pth" % (path_str, sr2)) if if_pretrained_discriminator_exist else "",
705
+ )
706
+
707
+
708
+ global log_interval
709
+
710
+
711
+ def set_log_interval(exp_dir, batch_size12):
712
+ log_interval = 1
713
+
714
+ folder_path = os.path.join(exp_dir, "1_16k_wavs")
715
+
716
+ if os.path.exists(folder_path) and os.path.isdir(folder_path):
717
+ wav_files = [f for f in os.listdir(folder_path) if f.endswith(".wav")]
718
+ if wav_files:
719
+ sample_size = len(wav_files)
720
+ log_interval = math.ceil(sample_size / batch_size12)
721
+ if log_interval > 1:
722
+ log_interval += 1
723
+ return log_interval
724
+
725
+ # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
726
+ def click_train(
727
+ exp_dir1,
728
+ sr2,
729
+ if_f0_3,
730
+ spk_id5,
731
+ save_epoch10,
732
+ total_epoch11,
733
+ batch_size12,
734
+ if_save_latest13,
735
+ pretrained_G14,
736
+ pretrained_D15,
737
+ gpus16,
738
+ if_cache_gpu17,
739
+ if_save_every_weights18,
740
+ version19,
741
+ ):
742
+ CSVutil('csvdb/stop.csv', 'w+', 'formanting', False)
743
+ # 生成filelist
744
+ exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
745
+ os.makedirs(exp_dir, exist_ok=True)
746
+ gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
747
+ feature_dir = (
748
+ "%s/3_feature256" % (exp_dir)
749
+ if version19 == "v1"
750
+ else "%s/3_feature768" % (exp_dir)
751
+ )
752
+
753
+ log_interval = set_log_interval(exp_dir, batch_size12)
754
+
755
+ if if_f0_3:
756
+ f0_dir = "%s/2a_f0" % (exp_dir)
757
+ f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
758
+ names = (
759
+ set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
760
+ & set([name.split(".")[0] for name in os.listdir(feature_dir)])
761
+ & set([name.split(".")[0] for name in os.listdir(f0_dir)])
762
+ & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
763
+ )
764
+ else:
765
+ names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
766
+ [name.split(".")[0] for name in os.listdir(feature_dir)]
767
+ )
768
+ opt = []
769
+ for name in names:
770
+ if if_f0_3:
771
+ opt.append(
772
+ "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
773
+ % (
774
+ gt_wavs_dir.replace("\\", "\\\\"),
775
+ name,
776
+ feature_dir.replace("\\", "\\\\"),
777
+ name,
778
+ f0_dir.replace("\\", "\\\\"),
779
+ name,
780
+ f0nsf_dir.replace("\\", "\\\\"),
781
+ name,
782
+ spk_id5,
783
+ )
784
+ )
785
+ else:
786
+ opt.append(
787
+ "%s/%s.wav|%s/%s.npy|%s"
788
+ % (
789
+ gt_wavs_dir.replace("\\", "\\\\"),
790
+ name,
791
+ feature_dir.replace("\\", "\\\\"),
792
+ name,
793
+ spk_id5,
794
+ )
795
+ )
796
+ fea_dim = 256 if version19 == "v1" else 768
797
+ if if_f0_3:
798
+ for _ in range(2):
799
+ opt.append(
800
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
801
+ % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
802
+ )
803
+ else:
804
+ for _ in range(2):
805
+ opt.append(
806
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
807
+ % (now_dir, sr2, now_dir, fea_dim, spk_id5)
808
+ )
809
+ shuffle(opt)
810
+ with open("%s/filelist.txt" % exp_dir, "w") as f:
811
+ f.write("\n".join(opt))
812
+ print("write filelist done")
813
+ # 生成config#无需生成config
814
+ # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
815
+ print("use gpus:", gpus16)
816
+ if pretrained_G14 == "":
817
+ print("no pretrained Generator")
818
+ if pretrained_D15 == "":
819
+ print("no pretrained Discriminator")
820
+ if gpus16:
821
+ cmd = (
822
+ config.python_cmd
823
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s"
824
+ % (
825
+ exp_dir1,
826
+ sr2,
827
+ 1 if if_f0_3 else 0,
828
+ batch_size12,
829
+ gpus16,
830
+ total_epoch11,
831
+ save_epoch10,
832
+ ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
833
+ ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
834
+ 1 if if_save_latest13 == True else 0,
835
+ 1 if if_cache_gpu17 == True else 0,
836
+ 1 if if_save_every_weights18 == True else 0,
837
+ version19,
838
+ log_interval,
839
+ )
840
+ )
841
+ else:
842
+ cmd = (
843
+ config.python_cmd
844
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s"
845
+ % (
846
+ exp_dir1,
847
+ sr2,
848
+ 1 if if_f0_3 else 0,
849
+ batch_size12,
850
+ total_epoch11,
851
+ save_epoch10,
852
+ ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "\b",
853
+ ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "\b",
854
+ 1 if if_save_latest13 == True else 0,
855
+ 1 if if_cache_gpu17 == True else 0,
856
+ 1 if if_save_every_weights18 == True else 0,
857
+ version19,
858
+ log_interval,
859
+ )
860
+ )
861
+ print(cmd)
862
+ p = Popen(cmd, shell=True, cwd=now_dir)
863
+ global PID
864
+ PID = p.pid
865
+ p.wait()
866
+ return ("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"})
867
+
868
+
869
+ # but4.click(train_index, [exp_dir1], info3)
870
+ def train_index(exp_dir1, version19):
871
+ exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
872
+ os.makedirs(exp_dir, exist_ok=True)
873
+ feature_dir = (
874
+ "%s/3_feature256" % (exp_dir)
875
+ if version19 == "v1"
876
+ else "%s/3_feature768" % (exp_dir)
877
+ )
878
+ if os.path.exists(feature_dir) == False:
879
+ return "请先进行特征提取!"
880
+ listdir_res = list(os.listdir(feature_dir))
881
+ if len(listdir_res) == 0:
882
+ return "请先进行特征提取!"
883
+ npys = []
884
+ for name in sorted(listdir_res):
885
+ phone = np.load("%s/%s" % (feature_dir, name))
886
+ npys.append(phone)
887
+ big_npy = np.concatenate(npys, 0)
888
+ big_npy_idx = np.arange(big_npy.shape[0])
889
+ np.random.shuffle(big_npy_idx)
890
+ big_npy = big_npy[big_npy_idx]
891
+ np.save("%s/total_fea.npy" % exp_dir, big_npy)
892
+ # n_ivf = big_npy.shape[0] // 39
893
+ n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
894
+ infos = []
895
+ infos.append("%s,%s" % (big_npy.shape, n_ivf))
896
+ yield "\n".join(infos)
897
+ index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
898
+ # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
899
+ infos.append("training")
900
+ yield "\n".join(infos)
901
+ index_ivf = faiss.extract_index_ivf(index) #
902
+ index_ivf.nprobe = 1
903
+ index.train(big_npy)
904
+ faiss.write_index(
905
+ index,
906
+ "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
907
+ % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
908
+ )
909
+ # faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
910
+ infos.append("adding")
911
+ yield "\n".join(infos)
912
+ batch_size_add = 8192
913
+ for i in range(0, big_npy.shape[0], batch_size_add):
914
+ index.add(big_npy[i : i + batch_size_add])
915
+ faiss.write_index(
916
+ index,
917
+ "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
918
+ % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
919
+ )
920
+ infos.append(
921
+ "成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index"
922
+ % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
923
+ )
924
+ # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
925
+ # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
926
+ yield "\n".join(infos)
927
+
928
+
929
+ # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
930
+ def train1key(
931
+ exp_dir1,
932
+ sr2,
933
+ if_f0_3,
934
+ trainset_dir4,
935
+ spk_id5,
936
+ np7,
937
+ f0method8,
938
+ save_epoch10,
939
+ total_epoch11,
940
+ batch_size12,
941
+ if_save_latest13,
942
+ pretrained_G14,
943
+ pretrained_D15,
944
+ gpus16,
945
+ if_cache_gpu17,
946
+ if_save_every_weights18,
947
+ version19,
948
+ echl
949
+ ):
950
+ infos = []
951
+
952
+ def get_info_str(strr):
953
+ infos.append(strr)
954
+ return "\n".join(infos)
955
+
956
+ model_log_dir = "%s/logs/%s" % (now_dir, exp_dir1)
957
+ preprocess_log_path = "%s/preprocess.log" % model_log_dir
958
+ extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir
959
+ gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir
960
+ feature_dir = (
961
+ "%s/3_feature256" % model_log_dir
962
+ if version19 == "v1"
963
+ else "%s/3_feature768" % model_log_dir
964
+ )
965
+
966
+ os.makedirs(model_log_dir, exist_ok=True)
967
+ #########step1:处理数据
968
+ open(preprocess_log_path, "w").close()
969
+ cmd = (
970
+ config.python_cmd
971
+ + " trainset_preprocess_pipeline_print.py %s %s %s %s "
972
+ % (trainset_dir4, sr_dict[sr2], np7, model_log_dir)
973
+ + str(config.noparallel)
974
+ )
975
+ yield get_info_str(i18n("step1:正在处理数据"))
976
+ yield get_info_str(cmd)
977
+ p = Popen(cmd, shell=True)
978
+ p.wait()
979
+ with open(preprocess_log_path, "r") as f:
980
+ print(f.read())
981
+ #########step2a:提取音高
982
+ open(extract_f0_feature_log_path, "w")
983
+ if if_f0_3:
984
+ yield get_info_str("step2a:正在提取音高")
985
+ cmd = config.python_cmd + " extract_f0_print.py %s %s %s %s" % (
986
+ model_log_dir,
987
+ np7,
988
+ f0method8,
989
+ echl
990
+ )
991
+ yield get_info_str(cmd)
992
+ p = Popen(cmd, shell=True, cwd=now_dir)
993
+ p.wait()
994
+ with open(extract_f0_feature_log_path, "r") as f:
995
+ print(f.read())
996
+ else:
997
+ yield get_info_str(i18n("step2a:无需提取音高"))
998
+ #######step2b:提取特征
999
+ yield get_info_str(i18n("step2b:正在提取特征"))
1000
+ gpus = gpus16.split("-")
1001
+ leng = len(gpus)
1002
+ ps = []
1003
+ for idx, n_g in enumerate(gpus):
1004
+ cmd = config.python_cmd + " extract_feature_print.py %s %s %s %s %s %s" % (
1005
+ config.device,
1006
+ leng,
1007
+ idx,
1008
+ n_g,
1009
+ model_log_dir,
1010
+ version19,
1011
+ )
1012
+ yield get_info_str(cmd)
1013
+ p = Popen(
1014
+ cmd, shell=True, cwd=now_dir
1015
+ ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
1016
+ ps.append(p)
1017
+ for p in ps:
1018
+ p.wait()
1019
+ with open(extract_f0_feature_log_path, "r") as f:
1020
+ print(f.read())
1021
+ #######step3a:训练模型
1022
+ yield get_info_str(i18n("step3a:正在训练模型"))
1023
+ # 生成filelist
1024
+ if if_f0_3:
1025
+ f0_dir = "%s/2a_f0" % model_log_dir
1026
+ f0nsf_dir = "%s/2b-f0nsf" % model_log_dir
1027
+ names = (
1028
+ set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
1029
+ & set([name.split(".")[0] for name in os.listdir(feature_dir)])
1030
+ & set([name.split(".")[0] for name in os.listdir(f0_dir)])
1031
+ & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
1032
+ )
1033
+ else:
1034
+ names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
1035
+ [name.split(".")[0] for name in os.listdir(feature_dir)]
1036
+ )
1037
+ opt = []
1038
+ for name in names:
1039
+ if if_f0_3:
1040
+ opt.append(
1041
+ "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
1042
+ % (
1043
+ gt_wavs_dir.replace("\\", "\\\\"),
1044
+ name,
1045
+ feature_dir.replace("\\", "\\\\"),
1046
+ name,
1047
+ f0_dir.replace("\\", "\\\\"),
1048
+ name,
1049
+ f0nsf_dir.replace("\\", "\\\\"),
1050
+ name,
1051
+ spk_id5,
1052
+ )
1053
+ )
1054
+ else:
1055
+ opt.append(
1056
+ "%s/%s.wav|%s/%s.npy|%s"
1057
+ % (
1058
+ gt_wavs_dir.replace("\\", "\\\\"),
1059
+ name,
1060
+ feature_dir.replace("\\", "\\\\"),
1061
+ name,
1062
+ spk_id5,
1063
+ )
1064
+ )
1065
+ fea_dim = 256 if version19 == "v1" else 768
1066
+ if if_f0_3:
1067
+ for _ in range(2):
1068
+ opt.append(
1069
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
1070
+ % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
1071
+ )
1072
+ else:
1073
+ for _ in range(2):
1074
+ opt.append(
1075
+ "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
1076
+ % (now_dir, sr2, now_dir, fea_dim, spk_id5)
1077
+ )
1078
+ shuffle(opt)
1079
+ with open("%s/filelist.txt" % model_log_dir, "w") as f:
1080
+ f.write("\n".join(opt))
1081
+ yield get_info_str("write filelist done")
1082
+ if gpus16:
1083
+ cmd = (
1084
+ config.python_cmd
1085
+ +" train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1086
+ % (
1087
+ exp_dir1,
1088
+ sr2,
1089
+ 1 if if_f0_3 else 0,
1090
+ batch_size12,
1091
+ gpus16,
1092
+ total_epoch11,
1093
+ save_epoch10,
1094
+ ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
1095
+ ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
1096
+ 1 if if_save_latest13 == True else 0,
1097
+ 1 if if_cache_gpu17 == True else 0,
1098
+ 1 if if_save_every_weights18 == True else 0,
1099
+ version19,
1100
+ )
1101
+ )
1102
+ else:
1103
+ cmd = (
1104
+ config.python_cmd
1105
+ + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1106
+ % (
1107
+ exp_dir1,
1108
+ sr2,
1109
+ 1 if if_f0_3 else 0,
1110
+ batch_size12,
1111
+ total_epoch11,
1112
+ save_epoch10,
1113
+ ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
1114
+ ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
1115
+ 1 if if_save_latest13 == True else 0,
1116
+ 1 if if_cache_gpu17 == True else 0,
1117
+ 1 if if_save_every_weights18 == True else 0,
1118
+ version19,
1119
+ )
1120
+ )
1121
+ yield get_info_str(cmd)
1122
+ p = Popen(cmd, shell=True, cwd=now_dir)
1123
+ p.wait()
1124
+ yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"))
1125
+ #######step3b:训练索引
1126
+ npys = []
1127
+ listdir_res = list(os.listdir(feature_dir))
1128
+ for name in sorted(listdir_res):
1129
+ phone = np.load("%s/%s" % (feature_dir, name))
1130
+ npys.append(phone)
1131
+ big_npy = np.concatenate(npys, 0)
1132
+
1133
+ big_npy_idx = np.arange(big_npy.shape[0])
1134
+ np.random.shuffle(big_npy_idx)
1135
+ big_npy = big_npy[big_npy_idx]
1136
+ np.save("%s/total_fea.npy" % model_log_dir, big_npy)
1137
+
1138
+ # n_ivf = big_npy.shape[0] // 39
1139
+ n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
1140
+ yield get_info_str("%s,%s" % (big_npy.shape, n_ivf))
1141
+ index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
1142
+ yield get_info_str("training index")
1143
+ index_ivf = faiss.extract_index_ivf(index) #
1144
+ index_ivf.nprobe = 1
1145
+ index.train(big_npy)
1146
+ faiss.write_index(
1147
+ index,
1148
+ "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
1149
+ % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
1150
+ )
1151
+ yield get_info_str("adding index")
1152
+ batch_size_add = 8192
1153
+ for i in range(0, big_npy.shape[0], batch_size_add):
1154
+ index.add(big_npy[i : i + batch_size_add])
1155
+ faiss.write_index(
1156
+ index,
1157
+ "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
1158
+ % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
1159
+ )
1160
+ yield get_info_str(
1161
+ "成功构建索引, added_IVF%s_Flat_nprobe_%s_%s_%s.index"
1162
+ % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
1163
+ )
1164
+ yield get_info_str(i18n("全流程结束!"))
1165
+
1166
+
1167
+ def whethercrepeornah(radio):
1168
+ mango = True if radio == 'mangio-crepe' or radio == 'mangio-crepe-tiny' else False
1169
+ return ({"visible": mango, "__type__": "update"})
1170
+
1171
+ # ckpt_path2.change(change_info_,[ckpt_path2],[sr__,if_f0__])
1172
+ def change_info_(ckpt_path):
1173
+ if (
1174
+ os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log"))
1175
+ == False
1176
+ ):
1177
+ return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
1178
+ try:
1179
+ with open(
1180
+ ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r"
1181
+ ) as f:
1182
+ info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
1183
+ sr, f0 = info["sample_rate"], info["if_f0"]
1184
+ version = "v2" if ("version" in info and info["version"] == "v2") else "v1"
1185
+ return sr, str(f0), version
1186
+ except:
1187
+ traceback.print_exc()
1188
+ return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
1189
+
1190
+
1191
+ from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
1192
+
1193
+
1194
+ def export_onnx(ModelPath, ExportedPath, MoeVS=True):
1195
+ cpt = torch.load(ModelPath, map_location="cpu")
1196
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
1197
+ hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels,为768Vec做准备
1198
+
1199
+ test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
1200
+ test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
1201
+ test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
1202
+ test_pitchf = torch.rand(1, 200) # nsf基频
1203
+ test_ds = torch.LongTensor([0]) # 说话人ID
1204
+ test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
1205
+
1206
+ device = "cpu" # 导出时设备(不影响使用模型)
1207
+
1208
+
1209
+ net_g = SynthesizerTrnMsNSFsidM(
1210
+ *cpt["config"], is_half=False,version=cpt.get("version","v1")
1211
+ ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
1212
+ net_g.load_state_dict(cpt["weight"], strict=False)
1213
+ input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
1214
+ output_names = [
1215
+ "audio",
1216
+ ]
1217
+ # net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出
1218
+ torch.onnx.export(
1219
+ net_g,
1220
+ (
1221
+ test_phone.to(device),
1222
+ test_phone_lengths.to(device),
1223
+ test_pitch.to(device),
1224
+ test_pitchf.to(device),
1225
+ test_ds.to(device),
1226
+ test_rnd.to(device),
1227
+ ),
1228
+ ExportedPath,
1229
+ dynamic_axes={
1230
+ "phone": [1],
1231
+ "pitch": [1],
1232
+ "pitchf": [1],
1233
+ "rnd": [2],
1234
+ },
1235
+ do_constant_folding=False,
1236
+ opset_version=16,
1237
+ verbose=False,
1238
+ input_names=input_names,
1239
+ output_names=output_names,
1240
+ )
1241
+ return "Finished"
1242
+
1243
+ #region RVC WebUI App
1244
+
1245
+ def get_presets():
1246
+ data = None
1247
+ with open('../inference-presets.json', 'r') as file:
1248
+ data = json.load(file)
1249
+ preset_names = []
1250
+ for preset in data['presets']:
1251
+ preset_names.append(preset['name'])
1252
+
1253
+ return preset_names
1254
+
1255
+ def change_choices2():
1256
+ audio_files=[]
1257
+ for filename in os.listdir("./audios"):
1258
+ if filename.endswith(('.wav','.mp3','.ogg','.flac','.m4a','.aac','.mp4')):
1259
+ audio_files.append(os.path.join('./audios',filename).replace('\\', '/'))
1260
+ return {"choices": sorted(audio_files), "__type__": "update"}, {"__type__": "update"}
1261
+
1262
+ audio_files=[]
1263
+ for filename in os.listdir("./audios"):
1264
+ if filename.endswith(('.wav','.mp3','.ogg','.flac','.m4a','.aac','.mp4')):
1265
+ audio_files.append(os.path.join('./audios',filename).replace('\\', '/'))
1266
+
1267
+ def get_index():
1268
+ if check_for_name() != '':
1269
+ chosen_model=sorted(names)[0].split(".")[0]
1270
+ logs_path="./logs/"+chosen_model
1271
+ if os.path.exists(logs_path):
1272
+ for file in os.listdir(logs_path):
1273
+ if file.endswith(".index"):
1274
+ return os.path.join(logs_path, file)
1275
+ return ''
1276
+ else:
1277
+ return ''
1278
+
1279
+ def get_indexes():
1280
+ indexes_list=[]
1281
+ for dirpath, dirnames, filenames in os.walk("./logs/"):
1282
+ for filename in filenames:
1283
+ if filename.endswith(".index"):
1284
+ indexes_list.append(os.path.join(dirpath,filename))
1285
+ if len(indexes_list) > 0:
1286
+ return indexes_list
1287
+ else:
1288
+ return ''
1289
+
1290
+ def get_name():
1291
+ if len(audio_files) > 0:
1292
+ return sorted(audio_files)[0]
1293
+ else:
1294
+ return ''
1295
+
1296
+ def save_to_wav(record_button):
1297
+ if record_button is None:
1298
+ pass
1299
+ else:
1300
+ path_to_file=record_button
1301
+ new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
1302
+ new_path='./audios/'+new_name
1303
+ shutil.move(path_to_file,new_path)
1304
+ return new_path
1305
+
1306
+ def save_to_wav2(dropbox):
1307
+ file_path=dropbox.name
1308
+ shutil.move(file_path,'./audios')
1309
+ return os.path.join('./audios',os.path.basename(file_path))
1310
+
1311
+ def match_index(sid0):
1312
+ folder=sid0.split(".")[0]
1313
+ parent_dir="./logs/"+folder
1314
+ if os.path.exists(parent_dir):
1315
+ for filename in os.listdir(parent_dir):
1316
+ if filename.endswith(".index"):
1317
+ index_path=os.path.join(parent_dir,filename)
1318
+ return index_path
1319
+ else:
1320
+ return ''
1321
+
1322
+ def check_for_name():
1323
+ if len(names) > 0:
1324
+ return sorted(names)[0]
1325
+ else:
1326
+ return ''
1327
+
1328
+ def download_from_url(url, model):
1329
+ if url == '':
1330
+ return "URL cannot be left empty."
1331
+ if model =='':
1332
+ return "You need to name your model. For example: My-Model"
1333
+ url = url.strip()
1334
+ zip_dirs = ["zips", "unzips"]
1335
+ for directory in zip_dirs:
1336
+ if os.path.exists(directory):
1337
+ shutil.rmtree(directory)
1338
+ os.makedirs("zips", exist_ok=True)
1339
+ os.makedirs("unzips", exist_ok=True)
1340
+ zipfile = model + '.zip'
1341
+ zipfile_path = './zips/' + zipfile
1342
+ try:
1343
+ if "drive.google.com" in url:
1344
+ subprocess.run(["gdown", url, "--fuzzy", "-O", zipfile_path])
1345
+ elif "mega.nz" in url:
1346
+ m = Mega()
1347
+ m.download_url(url, './zips')
1348
+ else:
1349
+ subprocess.run(["wget", url, "-O", zipfile_path])
1350
+ for filename in os.listdir("./zips"):
1351
+ if filename.endswith(".zip"):
1352
+ zipfile_path = os.path.join("./zips/",filename)
1353
+ shutil.unpack_archive(zipfile_path, "./unzips", 'zip')
1354
+ else:
1355
+ return "No zipfile found."
1356
+ for root, dirs, files in os.walk('./unzips'):
1357
+ for file in files:
1358
+ file_path = os.path.join(root, file)
1359
+ if file.endswith(".index"):
1360
+ os.mkdir(f'./logs/{model}')
1361
+ shutil.copy2(file_path,f'./logs/{model}')
1362
+ elif "G_" not in file and "D_" not in file and file.endswith(".pth"):
1363
+ shutil.copy(file_path,f'./weights/{model}.pth')
1364
+ shutil.rmtree("zips")
1365
+ shutil.rmtree("unzips")
1366
+ return "Success."
1367
+ except:
1368
+ return "There's been an error."
1369
+ def success_message(face):
1370
+ return f'{face.name} has been uploaded.', 'None'
1371
+ def mouth(size, face, voice, faces):
1372
+ if size == 'Half':
1373
+ size = 2
1374
+ else:
1375
+ size = 1
1376
+ if faces == 'None':
1377
+ character = face.name
1378
+ else:
1379
+ if faces == 'Ben Shapiro':
1380
+ character = '/content/wav2lip-HD/inputs/ben-shapiro-10.mp4'
1381
+ elif faces == 'Andrew Tate':
1382
+ character = '/content/wav2lip-HD/inputs/tate-7.mp4'
1383
+ command = "python inference.py " \
1384
+ "--checkpoint_path checkpoints/wav2lip.pth " \
1385
+ f"--face {character} " \
1386
+ f"--audio {voice} " \
1387
+ "--pads 0 20 0 0 " \
1388
+ "--outfile /content/wav2lip-HD/outputs/result.mp4 " \
1389
+ "--fps 24 " \
1390
+ f"--resize_factor {size}"
1391
+ process = subprocess.Popen(command, shell=True, cwd='/content/wav2lip-HD/Wav2Lip-master')
1392
+ stdout, stderr = process.communicate()
1393
+ return '/content/wav2lip-HD/outputs/result.mp4', 'Animation completed.'
1394
+ eleven_voices = ['Adam','Antoni','Josh','Arnold','Sam','Bella','Rachel','Domi','Elli']
1395
+ eleven_voices_ids=['pNInz6obpgDQGcFmaJgB','ErXwobaYiN019PkySvjV','TxGEqnHWrfWFTfGW9XjX','VR6AewLTigWG4xSOukaG','yoZ06aMxZJJ28mfd3POQ','EXAVITQu4vr4xnSDxMaL','21m00Tcm4TlvDq8ikWAM','AZnzlk1XvdvUeBnXmlld','MF3mGyEYCl7XYWbV9V6O']
1396
+ chosen_voice = dict(zip(eleven_voices, eleven_voices_ids))
1397
+
1398
+ def stoptraining(mim):
1399
+ if int(mim) == 1:
1400
+ try:
1401
+ CSVutil('csvdb/stop.csv', 'w+', 'stop', 'True')
1402
+ os.kill(PID, signal.SIGTERM)
1403
+ except Exception as e:
1404
+ print(f"Couldn't click due to {e}")
1405
+ return (
1406
+ {"visible": False, "__type__": "update"},
1407
+ {"visible": True, "__type__": "update"},
1408
+ )
1409
+
1410
+
1411
+ def elevenTTS(xiapi, text, id, lang):
1412
+ if xiapi!= '' and id !='':
1413
+ choice = chosen_voice[id]
1414
+ CHUNK_SIZE = 1024
1415
+ url = f"https://api.elevenlabs.io/v1/text-to-speech/{choice}"
1416
+ headers = {
1417
+ "Accept": "audio/mpeg",
1418
+ "Content-Type": "application/json",
1419
+ "xi-api-key": xiapi
1420
+ }
1421
+ if lang == 'en':
1422
+ data = {
1423
+ "text": text,
1424
+ "model_id": "eleven_monolingual_v1",
1425
+ "voice_settings": {
1426
+ "stability": 0.5,
1427
+ "similarity_boost": 0.5
1428
+ }
1429
+ }
1430
+ else:
1431
+ data = {
1432
+ "text": text,
1433
+ "model_id": "eleven_multilingual_v1",
1434
+ "voice_settings": {
1435
+ "stability": 0.5,
1436
+ "similarity_boost": 0.5
1437
+ }
1438
+ }
1439
+
1440
+ response = requests.post(url, json=data, headers=headers)
1441
+ with open('./temp_eleven.mp3', 'wb') as f:
1442
+ for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
1443
+ if chunk:
1444
+ f.write(chunk)
1445
+ aud_path = save_to_wav('./temp_eleven.mp3')
1446
+ return aud_path, aud_path
1447
+ else:
1448
+ tts = gTTS(text, lang=lang)
1449
+ tts.save('./temp_gTTS.mp3')
1450
+ aud_path = save_to_wav('./temp_gTTS.mp3')
1451
+ return aud_path, aud_path
1452
+
1453
+ def upload_to_dataset(files, dir):
1454
+ if dir == '':
1455
+ dir = './dataset'
1456
+ if not os.path.exists(dir):
1457
+ os.makedirs(dir)
1458
+ count = 0
1459
+ for file in files:
1460
+ path=file.name
1461
+ shutil.copy2(path,dir)
1462
+ count += 1
1463
+ return f' {count} files uploaded to {dir}.'
1464
+
1465
+ def zip_downloader(model):
1466
+ if not os.path.exists(f'./weights/{model}.pth'):
1467
+ return {"__type__": "update"}, f'Make sure the Voice Name is correct. I could not find {model}.pth'
1468
+ index_found = False
1469
+ for file in os.listdir(f'./logs/{model}'):
1470
+ if file.endswith('.index') and 'added' in file:
1471
+ log_file = file
1472
+ index_found = True
1473
+ if index_found:
1474
+ return [f'./weights/{model}.pth', f'./logs/{model}/{log_file}'], "Done"
1475
+ else:
1476
+ return f'./weights/{model}.pth', "Could not find Index file."
1477
+
1478
+ with gr.Blocks(theme=gr.themes.Base(), title='RVC EASY GUI 🔊') as app:
1479
+ with gr.Tabs():
1480
+ with gr.TabItem("Inference"):
1481
+ gr.HTML("<h1> RVC EASY GUI </h1>")
1482
+ gr.HTML("<h10> Colab version made by Clebersla </h10>")
1483
+ gr.HTML("<h10> Easy GUI coded by Rejekt's RVC GUI coded by Tiger14n </h10>")
1484
+
1485
+ # Inference Preset Row
1486
+ # with gr.Row():
1487
+ # mangio_preset = gr.Dropdown(label="Inference Preset", choices=sorted(get_presets()))
1488
+ # mangio_preset_name_save = gr.Textbox(
1489
+ # label="Your preset name"
1490
+ # )
1491
+ # mangio_preset_save_btn = gr.Button('Save Preset', variant="primary")
1492
+
1493
+ # Other RVC stuff
1494
+ with gr.Row():
1495
+ sid0 = gr.Dropdown(label="1.Choose your Model.", choices=sorted(names), value=check_for_name())
1496
+ refresh_button = gr.Button("Refresh", variant="primary")
1497
+ if check_for_name() != '':
1498
+ get_vc(sorted(names)[0])
1499
+ vc_transform0 = gr.Number(label="Optional: You can change the pitch here or leave it at 0.", value=0)
1500
+ #clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary")
1501
+ spk_item = gr.Slider(
1502
+ minimum=0,
1503
+ maximum=2333,
1504
+ step=1,
1505
+ label=i18n("请选择说话人id"),
1506
+ value=0,
1507
+ visible=False,
1508
+ interactive=True,
1509
+ )
1510
+ #clean_button.click(fn=clean, inputs=[], outputs=[sid0])
1511
+ sid0.change(
1512
+ fn=get_vc,
1513
+ inputs=[sid0],
1514
+ outputs=[spk_item],
1515
+ )
1516
+ but0 = gr.Button("Convert", variant="primary")
1517
+ with gr.Row():
1518
+ with gr.Column():
1519
+ with gr.Row():
1520
+ dropbox = gr.File(label="Drop your audio here & hit the Reload button.")
1521
+ with gr.Row():
1522
+ record_button=gr.Audio(source="microphone", label="OR Record audio.", type="filepath")
1523
+ with gr.Row():
1524
+ input_audio0 = gr.Dropdown(
1525
+ label="2.Choose your audio.",
1526
+ value="./audios/someguy.mp3",
1527
+ choices=audio_files
1528
+ )
1529
+ dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0])
1530
+ dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio0])
1531
+ refresh_button2 = gr.Button("Refresh", variant="primary", size='sm')
1532
+ record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0])
1533
+ record_button.change(fn=change_choices2, inputs=[], outputs=[input_audio0])
1534
+ with gr.Row():
1535
+ with gr.Accordion('Text To Speech', open=False):
1536
+ with gr.Column():
1537
+ lang = gr.Radio(label='Chinese & Japanese do not work with ElevenLabs currently.',choices=['en','es','fr','pt','zh-CN','de','hi','ja'], value='en')
1538
+ api_box = gr.Textbox(label="Enter your API Key for ElevenLabs, or leave empty to use GoogleTTS", value='')
1539
+ elevenid=gr.Dropdown(label="Voice:", choices=eleven_voices)
1540
+ with gr.Column():
1541
+ tfs = gr.Textbox(label="Input your Text", interactive=True, value="This is a test.")
1542
+ tts_button = gr.Button(value="Speak")
1543
+ tts_button.click(fn=elevenTTS, inputs=[api_box,tfs, elevenid, lang], outputs=[record_button, input_audio0])
1544
+ with gr.Row():
1545
+ with gr.Accordion('Wav2Lip', open=False):
1546
+ with gr.Row():
1547
+ size = gr.Radio(label='Resolution:',choices=['Half','Full'])
1548
+ face = gr.UploadButton("Upload A Character",type='file')
1549
+ faces = gr.Dropdown(label="OR Choose one:", choices=['None','Ben Shapiro','Andrew Tate'])
1550
+ with gr.Row():
1551
+ preview = gr.Textbox(label="Status:",interactive=False)
1552
+ face.upload(fn=success_message,inputs=[face], outputs=[preview, faces])
1553
+ with gr.Row():
1554
+ animation = gr.Video(type='filepath')
1555
+ refresh_button2.click(fn=change_choices2, inputs=[], outputs=[input_audio0, animation])
1556
+ with gr.Row():
1557
+ animate_button = gr.Button('Animate')
1558
+
1559
+ with gr.Column():
1560
+ with gr.Accordion("Index Settings", open=False):
1561
+ file_index1 = gr.Dropdown(
1562
+ label="3. Path to your added.index file (if it didn't automatically find it.)",
1563
+ choices=get_indexes(),
1564
+ value=get_index(),
1565
+ interactive=True,
1566
+ )
1567
+ sid0.change(fn=match_index, inputs=[sid0],outputs=[file_index1])
1568
+ refresh_button.click(
1569
+ fn=change_choices, inputs=[], outputs=[sid0, file_index1]
1570
+ )
1571
+ # file_big_npy1 = gr.Textbox(
1572
+ # label=i18n("特征文件路径"),
1573
+ # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1574
+ # interactive=True,
1575
+ # )
1576
+ index_rate1 = gr.Slider(
1577
+ minimum=0,
1578
+ maximum=1,
1579
+ label=i18n("检索特征占比"),
1580
+ value=0.66,
1581
+ interactive=True,
1582
+ )
1583
+ vc_output2 = gr.Audio(
1584
+ label="Output Audio (Click on the Three Dots in the Right Corner to Download)",
1585
+ type='filepath',
1586
+ interactive=False,
1587
+ )
1588
+ animate_button.click(fn=mouth, inputs=[size, face, vc_output2, faces], outputs=[animation, preview])
1589
+ with gr.Accordion("Advanced Settings", open=False):
1590
+ f0method0 = gr.Radio(
1591
+ label="Optional: Change the Pitch Extraction Algorithm.\nExtraction methods are sorted from 'worst quality' to 'best quality'.\nmangio-crepe may or may not be better than rmvpe in cases where 'smoothness' is more important, but rmvpe is the best overall.",
1592
+ choices=["pm", "dio", "crepe-tiny", "mangio-crepe-tiny", "crepe", "harvest", "mangio-crepe", "rmvpe"], # Fork Feature. Add Crepe-Tiny
1593
+ value="rmvpe",
1594
+ interactive=True,
1595
+ )
1596
+
1597
+ crepe_hop_length = gr.Slider(
1598
+ minimum=1,
1599
+ maximum=512,
1600
+ step=1,
1601
+ label="Mangio-Crepe Hop Length. Higher numbers will reduce the chance of extreme pitch changes but lower numbers will increase accuracy. 64-192 is a good range to experiment with.",
1602
+ value=120,
1603
+ interactive=True,
1604
+ visible=False,
1605
+ )
1606
+ f0method0.change(fn=whethercrepeornah, inputs=[f0method0], outputs=[crepe_hop_length])
1607
+ filter_radius0 = gr.Slider(
1608
+ minimum=0,
1609
+ maximum=7,
1610
+ label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1611
+ value=3,
1612
+ step=1,
1613
+ interactive=True,
1614
+ )
1615
+ resample_sr0 = gr.Slider(
1616
+ minimum=0,
1617
+ maximum=48000,
1618
+ label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1619
+ value=0,
1620
+ step=1,
1621
+ interactive=True,
1622
+ visible=False
1623
+ )
1624
+ rms_mix_rate0 = gr.Slider(
1625
+ minimum=0,
1626
+ maximum=1,
1627
+ label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1628
+ value=0.21,
1629
+ interactive=True,
1630
+ )
1631
+ protect0 = gr.Slider(
1632
+ minimum=0,
1633
+ maximum=0.5,
1634
+ label=i18n("保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"),
1635
+ value=0.33,
1636
+ step=0.01,
1637
+ interactive=True,
1638
+ )
1639
+ formanting = gr.Checkbox(
1640
+ value=bool(DoFormant),
1641
+ label="[EXPERIMENTAL] Formant shift inference audio",
1642
+ info="Used for male to female and vice-versa conversions",
1643
+ interactive=True,
1644
+ visible=True,
1645
+ )
1646
+
1647
+ formant_preset = gr.Dropdown(
1648
+ value='',
1649
+ choices=get_fshift_presets(),
1650
+ label="browse presets for formanting",
1651
+ visible=bool(DoFormant),
1652
+ )
1653
+ formant_refresh_button = gr.Button(
1654
+ value='\U0001f504',
1655
+ visible=bool(DoFormant),
1656
+ variant='primary',
1657
+ )
1658
+ #formant_refresh_button = ToolButton( elem_id='1')
1659
+ #create_refresh_button(formant_preset, lambda: {"choices": formant_preset}, "refresh_list_shiftpresets")
1660
+
1661
+ qfrency = gr.Slider(
1662
+ value=Quefrency,
1663
+ info="Default value is 1.0",
1664
+ label="Quefrency for formant shifting",
1665
+ minimum=0.0,
1666
+ maximum=16.0,
1667
+ step=0.1,
1668
+ visible=bool(DoFormant),
1669
+ interactive=True,
1670
+ )
1671
+ tmbre = gr.Slider(
1672
+ value=Timbre,
1673
+ info="Default value is 1.0",
1674
+ label="Timbre for formant shifting",
1675
+ minimum=0.0,
1676
+ maximum=16.0,
1677
+ step=0.1,
1678
+ visible=bool(DoFormant),
1679
+ interactive=True,
1680
+ )
1681
+
1682
+ formant_preset.change(fn=preset_apply, inputs=[formant_preset, qfrency, tmbre], outputs=[qfrency, tmbre])
1683
+ frmntbut = gr.Button("Apply", variant="primary", visible=bool(DoFormant))
1684
+ formanting.change(fn=formant_enabled,inputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button],outputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button])
1685
+ frmntbut.click(fn=formant_apply,inputs=[qfrency, tmbre], outputs=[qfrency, tmbre])
1686
+ formant_refresh_button.click(fn=update_fshift_presets,inputs=[formant_preset, qfrency, tmbre],outputs=[formant_preset, qfrency, tmbre])
1687
+ with gr.Row():
1688
+ vc_output1 = gr.Textbox("")
1689
+ f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False)
1690
+
1691
+ but0.click(
1692
+ vc_single,
1693
+ [
1694
+ spk_item,
1695
+ input_audio0,
1696
+ vc_transform0,
1697
+ f0_file,
1698
+ f0method0,
1699
+ file_index1,
1700
+ # file_index2,
1701
+ # file_big_npy1,
1702
+ index_rate1,
1703
+ filter_radius0,
1704
+ resample_sr0,
1705
+ rms_mix_rate0,
1706
+ protect0,
1707
+ crepe_hop_length
1708
+ ],
1709
+ [vc_output1, vc_output2],
1710
+ )
1711
+
1712
+ with gr.Accordion("Batch Conversion",open=False):
1713
+ with gr.Row():
1714
+ with gr.Column():
1715
+ vc_transform1 = gr.Number(
1716
+ label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1717
+ )
1718
+ opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1719
+ f0method1 = gr.Radio(
1720
+ label=i18n(
1721
+ "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU"
1722
+ ),
1723
+ choices=["pm", "harvest", "crepe", "rmvpe"],
1724
+ value="rmvpe",
1725
+ interactive=True,
1726
+ )
1727
+ filter_radius1 = gr.Slider(
1728
+ minimum=0,
1729
+ maximum=7,
1730
+ label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1731
+ value=3,
1732
+ step=1,
1733
+ interactive=True,
1734
+ )
1735
+ with gr.Column():
1736
+ file_index3 = gr.Textbox(
1737
+ label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"),
1738
+ value="",
1739
+ interactive=True,
1740
+ )
1741
+ file_index4 = gr.Dropdown(
1742
+ label=i18n("自动检测index路径,下拉式选择(dropdown)"),
1743
+ choices=sorted(index_paths),
1744
+ interactive=True,
1745
+ )
1746
+ refresh_button.click(
1747
+ fn=lambda: change_choices()[1],
1748
+ inputs=[],
1749
+ outputs=file_index4,
1750
+ )
1751
+ # file_big_npy2 = gr.Textbox(
1752
+ # label=i18n("特征文件路径"),
1753
+ # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1754
+ # interactive=True,
1755
+ # )
1756
+ index_rate2 = gr.Slider(
1757
+ minimum=0,
1758
+ maximum=1,
1759
+ label=i18n("检索特征占比"),
1760
+ value=1,
1761
+ interactive=True,
1762
+ )
1763
+ with gr.Column():
1764
+ resample_sr1 = gr.Slider(
1765
+ minimum=0,
1766
+ maximum=48000,
1767
+ label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1768
+ value=0,
1769
+ step=1,
1770
+ interactive=True,
1771
+ )
1772
+ rms_mix_rate1 = gr.Slider(
1773
+ minimum=0,
1774
+ maximum=1,
1775
+ label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1776
+ value=1,
1777
+ interactive=True,
1778
+ )
1779
+ protect1 = gr.Slider(
1780
+ minimum=0,
1781
+ maximum=0.5,
1782
+ label=i18n(
1783
+ "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"
1784
+ ),
1785
+ value=0.33,
1786
+ step=0.01,
1787
+ interactive=True,
1788
+ )
1789
+ with gr.Column():
1790
+ dir_input = gr.Textbox(
1791
+ label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1792
+ value="E:\codes\py39\\test-20230416b\\todo-songs",
1793
+ )
1794
+ inputs = gr.File(
1795
+ file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1796
+ )
1797
+ with gr.Row():
1798
+ format1 = gr.Radio(
1799
+ label=i18n("导出文件格式"),
1800
+ choices=["wav", "flac", "mp3", "m4a"],
1801
+ value="flac",
1802
+ interactive=True,
1803
+ )
1804
+ but1 = gr.Button(i18n("转换"), variant="primary")
1805
+ vc_output3 = gr.Textbox(label=i18n("输出信息"))
1806
+ but1.click(
1807
+ vc_multi,
1808
+ [
1809
+ spk_item,
1810
+ dir_input,
1811
+ opt_input,
1812
+ inputs,
1813
+ vc_transform1,
1814
+ f0method1,
1815
+ file_index3,
1816
+ file_index4,
1817
+ # file_big_npy2,
1818
+ index_rate2,
1819
+ filter_radius1,
1820
+ resample_sr1,
1821
+ rms_mix_rate1,
1822
+ protect1,
1823
+ format1,
1824
+ crepe_hop_length,
1825
+ ],
1826
+ [vc_output3],
1827
+ )
1828
+ but1.click(fn=lambda: easy_uploader.clear())
1829
+ with gr.TabItem("Download Model"):
1830
+ with gr.Row():
1831
+ url=gr.Textbox(label="Enter the URL to the Model:")
1832
+ with gr.Row():
1833
+ model = gr.Textbox(label="Name your model:")
1834
+ download_button=gr.Button("Download ↩️")
1835
+ with gr.Row():
1836
+ status_bar=gr.Textbox(label="")
1837
+ download_button.click(fn=download_from_url, inputs=[url, model], outputs=[status_bar])
1838
+ with gr.Row():
1839
+ gr.Markdown(
1840
+ """
1841
+ laynz28's huggingface:https://huggingface.co/LaynzID12
1842
+ Original RVC:https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI Mangio’s RVC Fork:https://github.com/Mangio621/Mangio-RVC-Fork ❤️ RVC GUI:https://github.com/Tiger14n/RVC-GUI ❤️ If you like the EasyGUI, help me keep it.❤️ https://paypal.me/lesantillan
1843
+ """
1844
+ )
1845
+
1846
+ def has_two_files_in_pretrained_folder():
1847
+ pretrained_folder = "./pretrained/"
1848
+ if not os.path.exists(pretrained_folder):
1849
+ return False
1850
+
1851
+ files_in_folder = os.listdir(pretrained_folder)
1852
+ num_files = len(files_in_folder)
1853
+ return num_files >= 2
1854
+
1855
+ if has_two_files_in_pretrained_folder():
1856
+ print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------")
1857
+ with gr.TabItem("Train", visible=False):
1858
+ with gr.Row():
1859
+ with gr.Column():
1860
+ exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice")
1861
+ sr2 = gr.Radio(
1862
+ label=i18n("目标采样率"),
1863
+ choices=["40k", "48k"],
1864
+ value="40k",
1865
+ interactive=True,
1866
+ visible=False
1867
+ )
1868
+ if_f0_3 = gr.Radio(
1869
+ label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1870
+ choices=[True, False],
1871
+ value=True,
1872
+ interactive=True,
1873
+ visible=False
1874
+ )
1875
+ version19 = gr.Radio(
1876
+ label="RVC version",
1877
+ choices=["v1", "v2"],
1878
+ value="v2",
1879
+ interactive=True,
1880
+ visible=False,
1881
+ )
1882
+ np7 = gr.Slider(
1883
+ minimum=0,
1884
+ maximum=config.n_cpu,
1885
+ step=1,
1886
+ label="# of CPUs for data processing (Leave as it is)",
1887
+ value=config.n_cpu,
1888
+ interactive=True,
1889
+ visible=True
1890
+ )
1891
+ trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset")
1892
+ easy_uploader = gr.Files(label='OR Drop your audios here. They will be uploaded in your dataset path above.',file_types=['audio'])
1893
+ but1 = gr.Button("1. Process The Dataset", variant="primary")
1894
+ info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="")
1895
+ easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4], outputs=[info1])
1896
+ but1.click(
1897
+ preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
1898
+ )
1899
+ with gr.Column():
1900
+ spk_id5 = gr.Slider(
1901
+ minimum=0,
1902
+ maximum=4,
1903
+ step=1,
1904
+ label=i18n("请指定说话人id"),
1905
+ value=0,
1906
+ interactive=True,
1907
+ visible=False
1908
+ )
1909
+ with gr.Accordion('GPU Settings', open=False, visible=False):
1910
+ gpus6 = gr.Textbox(
1911
+ label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1912
+ value=gpus,
1913
+ interactive=True,
1914
+ visible=False
1915
+ )
1916
+ gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1917
+ f0method8 = gr.Radio(
1918
+ label=i18n(
1919
+ "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1920
+ ),
1921
+ choices=["harvest","crepe", "mangio-crepe", "rmvpe"], # Fork feature: Crepe on f0 extraction for training.
1922
+ value="rmvpe",
1923
+ interactive=True,
1924
+ )
1925
+
1926
+ extraction_crepe_hop_length = gr.Slider(
1927
+ minimum=1,
1928
+ maximum=512,
1929
+ step=1,
1930
+ label=i18n("crepe_hop_length"),
1931
+ value=128,
1932
+ interactive=True,
1933
+ visible=False,
1934
+ )
1935
+ f0method8.change(fn=whethercrepeornah, inputs=[f0method8], outputs=[extraction_crepe_hop_length])
1936
+ but2 = gr.Button("2. Pitch Extraction", variant="primary")
1937
+ info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=8)
1938
+ but2.click(
1939
+ extract_f0_feature,
1940
+ [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
1941
+ [info2],
1942
+ )
1943
+ with gr.Row():
1944
+ with gr.Column():
1945
+ total_epoch11 = gr.Slider(
1946
+ minimum=1,
1947
+ maximum=5000,
1948
+ step=10,
1949
+ label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):",
1950
+ value=250,
1951
+ interactive=True,
1952
+ )
1953
+ butstop = gr.Button(
1954
+ "Stop Training",
1955
+ variant='primary',
1956
+ visible=False,
1957
+ )
1958
+ but3 = gr.Button("3. Train Model", variant="primary", visible=True)
1959
+
1960
+ but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop])
1961
+ butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[butstop, but3])
1962
+
1963
+
1964
+ but4 = gr.Button("4.Train Index", variant="primary")
1965
+ info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=10)
1966
+ with gr.Accordion("Training Preferences (You can leave these as they are)", open=False):
1967
+ #gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
1968
+ with gr.Column():
1969
+ save_epoch10 = gr.Slider(
1970
+ minimum=1,
1971
+ maximum=200,
1972
+ step=1,
1973
+ label="Backup every X amount of epochs:",
1974
+ value=10,
1975
+ interactive=True,
1976
+ )
1977
+ batch_size12 = gr.Slider(
1978
+ minimum=1,
1979
+ maximum=40,
1980
+ step=1,
1981
+ label="Batch Size (LEAVE IT unless you know what you're doing!):",
1982
+ value=default_batch_size,
1983
+ interactive=True,
1984
+ )
1985
+ if_save_latest13 = gr.Checkbox(
1986
+ label="Save only the latest '.ckpt' file to save disk space.",
1987
+ value=True,
1988
+ interactive=True,
1989
+ )
1990
+ if_cache_gpu17 = gr.Checkbox(
1991
+ label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.",
1992
+ value=False,
1993
+ interactive=True,
1994
+ )
1995
+ if_save_every_weights18 = gr.Checkbox(
1996
+ label="Save a small final model to the 'weights' folder at each save point.",
1997
+ value=True,
1998
+ interactive=True,
1999
+ )
2000
+ zip_model = gr.Button('5. Download Model')
2001
+ zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:')
2002
+ zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3])
2003
+ with gr.Group():
2004
+ with gr.Accordion("Base Model Locations:", open=False, visible=False):
2005
+ pretrained_G14 = gr.Textbox(
2006
+ label=i18n("加载预训练底模G路径"),
2007
+ value="pretrained_v2/f0G40k.pth",
2008
+ interactive=True,
2009
+ )
2010
+ pretrained_D15 = gr.Textbox(
2011
+ label=i18n("加载预训练底模D路径"),
2012
+ value="pretrained_v2/f0D40k.pth",
2013
+ interactive=True,
2014
+ )
2015
+ gpus16 = gr.Textbox(
2016
+ label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
2017
+ value=gpus,
2018
+ interactive=True,
2019
+ )
2020
+ sr2.change(
2021
+ change_sr2,
2022
+ [sr2, if_f0_3, version19],
2023
+ [pretrained_G14, pretrained_D15, version19],
2024
+ )
2025
+ version19.change(
2026
+ change_version19,
2027
+ [sr2, if_f0_3, version19],
2028
+ [pretrained_G14, pretrained_D15],
2029
+ )
2030
+ if_f0_3.change(
2031
+ change_f0,
2032
+ [if_f0_3, sr2, version19],
2033
+ [f0method8, pretrained_G14, pretrained_D15],
2034
+ )
2035
+ but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False)
2036
+ but3.click(
2037
+ click_train,
2038
+ [
2039
+ exp_dir1,
2040
+ sr2,
2041
+ if_f0_3,
2042
+ spk_id5,
2043
+ save_epoch10,
2044
+ total_epoch11,
2045
+ batch_size12,
2046
+ if_save_latest13,
2047
+ pretrained_G14,
2048
+ pretrained_D15,
2049
+ gpus16,
2050
+ if_cache_gpu17,
2051
+ if_save_every_weights18,
2052
+ version19,
2053
+ ],
2054
+ [
2055
+ info3,
2056
+ butstop,
2057
+ but3,
2058
+ ],
2059
+ )
2060
+ but4.click(train_index, [exp_dir1, version19], info3)
2061
+ but5.click(
2062
+ train1key,
2063
+ [
2064
+ exp_dir1,
2065
+ sr2,
2066
+ if_f0_3,
2067
+ trainset_dir4,
2068
+ spk_id5,
2069
+ np7,
2070
+ f0method8,
2071
+ save_epoch10,
2072
+ total_epoch11,
2073
+ batch_size12,
2074
+ if_save_latest13,
2075
+ pretrained_G14,
2076
+ pretrained_D15,
2077
+ gpus16,
2078
+ if_cache_gpu17,
2079
+ if_save_every_weights18,
2080
+ version19,
2081
+ extraction_crepe_hop_length
2082
+ ],
2083
+ info3,
2084
+ )
2085
+
2086
+ else:
2087
+ print(
2088
+ "Pretrained weights not downloaded. Disabling training tab.\n"
2089
+ "Wondering how to train a voice? Visit here for the RVC model training guide: https://t.ly/RVC_Training_Guide\n"
2090
+ "-------------------------------\n"
2091
+ )
2092
+
2093
+ app.queue(concurrency_count=511, max_size=1022).launch(share=True)
2094
+ #endregion