freyza commited on
Commit
f30ef49
1 Parent(s): 71ebbf9

Upload 2 files

Browse files
Files changed (2) hide show
  1. src/rmvpe.py +409 -0
  2. src/rvc.py +151 -0
src/rmvpe.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from librosa.filters import mel
6
+
7
+
8
+ class BiGRU(nn.Module):
9
+ def __init__(self, input_features, hidden_features, num_layers):
10
+ super(BiGRU, self).__init__()
11
+ self.gru = nn.GRU(
12
+ input_features,
13
+ hidden_features,
14
+ num_layers=num_layers,
15
+ batch_first=True,
16
+ bidirectional=True,
17
+ )
18
+
19
+ def forward(self, x):
20
+ return self.gru(x)[0]
21
+
22
+
23
+ class ConvBlockRes(nn.Module):
24
+ def __init__(self, in_channels, out_channels, momentum=0.01):
25
+ super(ConvBlockRes, self).__init__()
26
+ self.conv = nn.Sequential(
27
+ nn.Conv2d(
28
+ in_channels=in_channels,
29
+ out_channels=out_channels,
30
+ kernel_size=(3, 3),
31
+ stride=(1, 1),
32
+ padding=(1, 1),
33
+ bias=False,
34
+ ),
35
+ nn.BatchNorm2d(out_channels, momentum=momentum),
36
+ nn.ReLU(),
37
+ nn.Conv2d(
38
+ in_channels=out_channels,
39
+ out_channels=out_channels,
40
+ kernel_size=(3, 3),
41
+ stride=(1, 1),
42
+ padding=(1, 1),
43
+ bias=False,
44
+ ),
45
+ nn.BatchNorm2d(out_channels, momentum=momentum),
46
+ nn.ReLU(),
47
+ )
48
+ if in_channels != out_channels:
49
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
50
+ self.is_shortcut = True
51
+ else:
52
+ self.is_shortcut = False
53
+
54
+ def forward(self, x):
55
+ if self.is_shortcut:
56
+ return self.conv(x) + self.shortcut(x)
57
+ else:
58
+ return self.conv(x) + x
59
+
60
+
61
+ class Encoder(nn.Module):
62
+ def __init__(
63
+ self,
64
+ in_channels,
65
+ in_size,
66
+ n_encoders,
67
+ kernel_size,
68
+ n_blocks,
69
+ out_channels=16,
70
+ momentum=0.01,
71
+ ):
72
+ super(Encoder, self).__init__()
73
+ self.n_encoders = n_encoders
74
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
75
+ self.layers = nn.ModuleList()
76
+ self.latent_channels = []
77
+ for i in range(self.n_encoders):
78
+ self.layers.append(
79
+ ResEncoderBlock(
80
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
81
+ )
82
+ )
83
+ self.latent_channels.append([out_channels, in_size])
84
+ in_channels = out_channels
85
+ out_channels *= 2
86
+ in_size //= 2
87
+ self.out_size = in_size
88
+ self.out_channel = out_channels
89
+
90
+ def forward(self, x):
91
+ concat_tensors = []
92
+ x = self.bn(x)
93
+ for i in range(self.n_encoders):
94
+ _, x = self.layers[i](x)
95
+ concat_tensors.append(_)
96
+ return x, concat_tensors
97
+
98
+
99
+ class ResEncoderBlock(nn.Module):
100
+ def __init__(
101
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
102
+ ):
103
+ super(ResEncoderBlock, self).__init__()
104
+ self.n_blocks = n_blocks
105
+ self.conv = nn.ModuleList()
106
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
107
+ for i in range(n_blocks - 1):
108
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
109
+ self.kernel_size = kernel_size
110
+ if self.kernel_size is not None:
111
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
112
+
113
+ def forward(self, x):
114
+ for i in range(self.n_blocks):
115
+ x = self.conv[i](x)
116
+ if self.kernel_size is not None:
117
+ return x, self.pool(x)
118
+ else:
119
+ return x
120
+
121
+
122
+ class Intermediate(nn.Module): #
123
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
124
+ super(Intermediate, self).__init__()
125
+ self.n_inters = n_inters
126
+ self.layers = nn.ModuleList()
127
+ self.layers.append(
128
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
129
+ )
130
+ for i in range(self.n_inters - 1):
131
+ self.layers.append(
132
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
133
+ )
134
+
135
+ def forward(self, x):
136
+ for i in range(self.n_inters):
137
+ x = self.layers[i](x)
138
+ return x
139
+
140
+
141
+ class ResDecoderBlock(nn.Module):
142
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
143
+ super(ResDecoderBlock, self).__init__()
144
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
145
+ self.n_blocks = n_blocks
146
+ self.conv1 = nn.Sequential(
147
+ nn.ConvTranspose2d(
148
+ in_channels=in_channels,
149
+ out_channels=out_channels,
150
+ kernel_size=(3, 3),
151
+ stride=stride,
152
+ padding=(1, 1),
153
+ output_padding=out_padding,
154
+ bias=False,
155
+ ),
156
+ nn.BatchNorm2d(out_channels, momentum=momentum),
157
+ nn.ReLU(),
158
+ )
159
+ self.conv2 = nn.ModuleList()
160
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
161
+ for i in range(n_blocks - 1):
162
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
163
+
164
+ def forward(self, x, concat_tensor):
165
+ x = self.conv1(x)
166
+ x = torch.cat((x, concat_tensor), dim=1)
167
+ for i in range(self.n_blocks):
168
+ x = self.conv2[i](x)
169
+ return x
170
+
171
+
172
+ class Decoder(nn.Module):
173
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
174
+ super(Decoder, self).__init__()
175
+ self.layers = nn.ModuleList()
176
+ self.n_decoders = n_decoders
177
+ for i in range(self.n_decoders):
178
+ out_channels = in_channels // 2
179
+ self.layers.append(
180
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
181
+ )
182
+ in_channels = out_channels
183
+
184
+ def forward(self, x, concat_tensors):
185
+ for i in range(self.n_decoders):
186
+ x = self.layers[i](x, concat_tensors[-1 - i])
187
+ return x
188
+
189
+
190
+ class DeepUnet(nn.Module):
191
+ def __init__(
192
+ self,
193
+ kernel_size,
194
+ n_blocks,
195
+ en_de_layers=5,
196
+ inter_layers=4,
197
+ in_channels=1,
198
+ en_out_channels=16,
199
+ ):
200
+ super(DeepUnet, self).__init__()
201
+ self.encoder = Encoder(
202
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
203
+ )
204
+ self.intermediate = Intermediate(
205
+ self.encoder.out_channel // 2,
206
+ self.encoder.out_channel,
207
+ inter_layers,
208
+ n_blocks,
209
+ )
210
+ self.decoder = Decoder(
211
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
212
+ )
213
+
214
+ def forward(self, x):
215
+ x, concat_tensors = self.encoder(x)
216
+ x = self.intermediate(x)
217
+ x = self.decoder(x, concat_tensors)
218
+ return x
219
+
220
+
221
+ class E2E(nn.Module):
222
+ def __init__(
223
+ self,
224
+ n_blocks,
225
+ n_gru,
226
+ kernel_size,
227
+ en_de_layers=5,
228
+ inter_layers=4,
229
+ in_channels=1,
230
+ en_out_channels=16,
231
+ ):
232
+ super(E2E, self).__init__()
233
+ self.unet = DeepUnet(
234
+ kernel_size,
235
+ n_blocks,
236
+ en_de_layers,
237
+ inter_layers,
238
+ in_channels,
239
+ en_out_channels,
240
+ )
241
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
242
+ if n_gru:
243
+ self.fc = nn.Sequential(
244
+ BiGRU(3 * 128, 256, n_gru),
245
+ nn.Linear(512, 360),
246
+ nn.Dropout(0.25),
247
+ nn.Sigmoid(),
248
+ )
249
+ else:
250
+ self.fc = nn.Sequential(
251
+ nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
252
+ )
253
+
254
+ def forward(self, mel):
255
+ mel = mel.transpose(-1, -2).unsqueeze(1)
256
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
257
+ x = self.fc(x)
258
+ return x
259
+
260
+
261
+ class MelSpectrogram(torch.nn.Module):
262
+ def __init__(
263
+ self,
264
+ is_half,
265
+ n_mel_channels,
266
+ sampling_rate,
267
+ win_length,
268
+ hop_length,
269
+ n_fft=None,
270
+ mel_fmin=0,
271
+ mel_fmax=None,
272
+ clamp=1e-5,
273
+ ):
274
+ super().__init__()
275
+ n_fft = win_length if n_fft is None else n_fft
276
+ self.hann_window = {}
277
+ mel_basis = mel(
278
+ sr=sampling_rate,
279
+ n_fft=n_fft,
280
+ n_mels=n_mel_channels,
281
+ fmin=mel_fmin,
282
+ fmax=mel_fmax,
283
+ htk=True,
284
+ )
285
+ mel_basis = torch.from_numpy(mel_basis).float()
286
+ self.register_buffer("mel_basis", mel_basis)
287
+ self.n_fft = win_length if n_fft is None else n_fft
288
+ self.hop_length = hop_length
289
+ self.win_length = win_length
290
+ self.sampling_rate = sampling_rate
291
+ self.n_mel_channels = n_mel_channels
292
+ self.clamp = clamp
293
+ self.is_half = is_half
294
+
295
+ def forward(self, audio, keyshift=0, speed=1, center=True):
296
+ factor = 2 ** (keyshift / 12)
297
+ n_fft_new = int(np.round(self.n_fft * factor))
298
+ win_length_new = int(np.round(self.win_length * factor))
299
+ hop_length_new = int(np.round(self.hop_length * speed))
300
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
301
+ if keyshift_key not in self.hann_window:
302
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
303
+ audio.device
304
+ )
305
+ fft = torch.stft(
306
+ audio,
307
+ n_fft=n_fft_new,
308
+ hop_length=hop_length_new,
309
+ win_length=win_length_new,
310
+ window=self.hann_window[keyshift_key],
311
+ center=center,
312
+ return_complex=True,
313
+ )
314
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
315
+ if keyshift != 0:
316
+ size = self.n_fft // 2 + 1
317
+ resize = magnitude.size(1)
318
+ if resize < size:
319
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
320
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
321
+ mel_output = torch.matmul(self.mel_basis, magnitude)
322
+ if self.is_half == True:
323
+ mel_output = mel_output.half()
324
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
325
+ return log_mel_spec
326
+
327
+
328
+ class RMVPE:
329
+ def __init__(self, model_path, is_half, device=None):
330
+ self.resample_kernel = {}
331
+ model = E2E(4, 1, (2, 2))
332
+ ckpt = torch.load(model_path, map_location="cpu")
333
+ model.load_state_dict(ckpt)
334
+ model.eval()
335
+ if is_half == True:
336
+ model = model.half()
337
+ self.model = model
338
+ self.resample_kernel = {}
339
+ self.is_half = is_half
340
+ if device is None:
341
+ device = "cuda" if torch.cuda.is_available() else "cpu"
342
+ self.device = device
343
+ self.mel_extractor = MelSpectrogram(
344
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
345
+ ).to(device)
346
+ self.model = self.model.to(device)
347
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
348
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
349
+
350
+ def mel2hidden(self, mel):
351
+ with torch.no_grad():
352
+ n_frames = mel.shape[-1]
353
+ mel = F.pad(
354
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
355
+ )
356
+ hidden = self.model(mel)
357
+ return hidden[:, :n_frames]
358
+
359
+ def decode(self, hidden, thred=0.03):
360
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
361
+ f0 = 10 * (2 ** (cents_pred / 1200))
362
+ f0[f0 == 10] = 0
363
+ # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
364
+ return f0
365
+
366
+ def infer_from_audio(self, audio, thred=0.03):
367
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
368
+ # torch.cuda.synchronize()
369
+ # t0=ttime()
370
+ mel = self.mel_extractor(audio, center=True)
371
+ # torch.cuda.synchronize()
372
+ # t1=ttime()
373
+ hidden = self.mel2hidden(mel)
374
+ # torch.cuda.synchronize()
375
+ # t2=ttime()
376
+ hidden = hidden.squeeze(0).cpu().numpy()
377
+ if self.is_half == True:
378
+ hidden = hidden.astype("float32")
379
+ f0 = self.decode(hidden, thred=thred)
380
+ # torch.cuda.synchronize()
381
+ # t3=ttime()
382
+ # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
383
+ return f0
384
+
385
+ def to_local_average_cents(self, salience, thred=0.05):
386
+ # t0 = ttime()
387
+ center = np.argmax(salience, axis=1) # 帧长#index
388
+ salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
389
+ # t1 = ttime()
390
+ center += 4
391
+ todo_salience = []
392
+ todo_cents_mapping = []
393
+ starts = center - 4
394
+ ends = center + 5
395
+ for idx in range(salience.shape[0]):
396
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
397
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
398
+ # t2 = ttime()
399
+ todo_salience = np.array(todo_salience) # 帧长,9
400
+ todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
401
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
402
+ weight_sum = np.sum(todo_salience, 1) # 帧长
403
+ devided = product_sum / weight_sum # 帧长
404
+ # t3 = ttime()
405
+ maxx = np.max(salience, axis=1) # 帧长
406
+ devided[maxx <= thred] = 0
407
+ # t4 = ttime()
408
+ # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
409
+ return devided
src/rvc.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from multiprocessing import cpu_count
2
+ from pathlib import Path
3
+
4
+ import torch
5
+ from fairseq import checkpoint_utils
6
+ from scipy.io import wavfile
7
+
8
+ from infer_pack.models import (
9
+ SynthesizerTrnMs256NSFsid,
10
+ SynthesizerTrnMs256NSFsid_nono,
11
+ SynthesizerTrnMs768NSFsid,
12
+ SynthesizerTrnMs768NSFsid_nono,
13
+ )
14
+ from my_utils import load_audio
15
+ from vc_infer_pipeline import VC
16
+
17
+ BASE_DIR = Path(__file__).resolve().parent.parent
18
+
19
+
20
+ class Config:
21
+ def __init__(self, device, is_half):
22
+ self.device = device
23
+ self.is_half = is_half
24
+ self.n_cpu = 0
25
+ self.gpu_name = None
26
+ self.gpu_mem = None
27
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
28
+
29
+ def device_config(self) -> tuple:
30
+ if torch.cuda.is_available():
31
+ i_device = int(self.device.split(":")[-1])
32
+ self.gpu_name = torch.cuda.get_device_name(i_device)
33
+ if (
34
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
35
+ or "P40" in self.gpu_name.upper()
36
+ or "1060" in self.gpu_name
37
+ or "1070" in self.gpu_name
38
+ or "1080" in self.gpu_name
39
+ ):
40
+ print("16 series/10 series P40 forced single precision")
41
+ self.is_half = False
42
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
43
+ with open(BASE_DIR / "src" / "configs" / config_file, "r") as f:
44
+ strr = f.read().replace("true", "false")
45
+ with open(BASE_DIR / "src" / "configs" / config_file, "w") as f:
46
+ f.write(strr)
47
+ with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f:
48
+ strr = f.read().replace("3.7", "3.0")
49
+ with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f:
50
+ f.write(strr)
51
+ else:
52
+ self.gpu_name = None
53
+ self.gpu_mem = int(
54
+ torch.cuda.get_device_properties(i_device).total_memory
55
+ / 1024
56
+ / 1024
57
+ / 1024
58
+ + 0.4
59
+ )
60
+ if self.gpu_mem <= 4:
61
+ with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f:
62
+ strr = f.read().replace("3.7", "3.0")
63
+ with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f:
64
+ f.write(strr)
65
+ elif torch.backends.mps.is_available():
66
+ print("No supported N-card found, use MPS for inference")
67
+ self.device = "mps"
68
+ else:
69
+ print("No supported N-card found, use CPU for inference")
70
+ self.device = "cpu"
71
+ self.is_half = True
72
+
73
+ if self.n_cpu == 0:
74
+ self.n_cpu = cpu_count()
75
+
76
+ if self.is_half:
77
+ # 6G memory config
78
+ x_pad = 3
79
+ x_query = 10
80
+ x_center = 60
81
+ x_max = 65
82
+ else:
83
+ # 5G memory config
84
+ x_pad = 1
85
+ x_query = 6
86
+ x_center = 38
87
+ x_max = 41
88
+
89
+ if self.gpu_mem != None and self.gpu_mem <= 4:
90
+ x_pad = 1
91
+ x_query = 5
92
+ x_center = 30
93
+ x_max = 32
94
+
95
+ return x_pad, x_query, x_center, x_max
96
+
97
+
98
+ def load_hubert(device, is_half, model_path):
99
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', )
100
+ hubert = models[0]
101
+ hubert = hubert.to(device)
102
+
103
+ if is_half:
104
+ hubert = hubert.half()
105
+ else:
106
+ hubert = hubert.float()
107
+
108
+ hubert.eval()
109
+ return hubert
110
+
111
+
112
+ def get_vc(device, is_half, config, model_path):
113
+ cpt = torch.load(model_path, map_location='cpu')
114
+ if "config" not in cpt or "weight" not in cpt:
115
+ raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.')
116
+
117
+ tgt_sr = cpt["config"][-1]
118
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
119
+ if_f0 = cpt.get("f0", 1)
120
+ version = cpt.get("version", "v1")
121
+
122
+ if version == "v1":
123
+ if if_f0 == 1:
124
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
125
+ else:
126
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
127
+ elif version == "v2":
128
+ if if_f0 == 1:
129
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
130
+ else:
131
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
132
+
133
+ del net_g.enc_q
134
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
135
+ net_g.eval().to(device)
136
+
137
+ if is_half:
138
+ net_g = net_g.half()
139
+ else:
140
+ net_g = net_g.float()
141
+
142
+ vc = VC(tgt_sr, config)
143
+ return cpt, version, net_g, tgt_sr, vc
144
+
145
+
146
+ def rvc_infer(index_path, index_rate, input_path, output_path, pitch_change, f0_method, cpt, version, net_g, filter_radius, tgt_sr, rms_mix_rate, protect, crepe_hop_length, vc, hubert_model):
147
+ audio = load_audio(input_path, 16000)
148
+ times = [0, 0, 0]
149
+ if_f0 = cpt.get('f0', 1)
150
+ audio_opt = vc.pipeline(hubert_model, net_g, 0, audio, input_path, times, pitch_change, f0_method, index_path, index_rate, if_f0, filter_radius, tgt_sr, 0, rms_mix_rate, version, protect, crepe_hop_length)
151
+ wavfile.write(output_path, tgt_sr, audio_opt)