DJQmUKV commited on
Commit
71ed4b2
1 Parent(s): 1c262c1

fix: wrong version of vc_infer_pipeline.py

Browse files
Files changed (1) hide show
  1. vc_infer_pipeline.py +363 -165
vc_infer_pipeline.py CHANGED
@@ -1,165 +1,363 @@
1
- import numpy as np,parselmouth,torch,pdb
2
- from time import time as ttime
3
- import torch.nn.functional as F
4
- from config import x_pad,x_query,x_center,x_max
5
- import scipy.signal as signal
6
- import pyworld,os,traceback,faiss
7
- class VC(object):
8
- def __init__(self,tgt_sr,device,is_half):
9
- self.sr=16000#hubert输入采样率
10
- self.window=160#每帧点数
11
- self.t_pad=self.sr*x_pad#每条前后pad时间
12
- self.t_pad_tgt=tgt_sr*x_pad
13
- self.t_pad2=self.t_pad*2
14
- self.t_query=self.sr*x_query#查询切点前后查询时间
15
- self.t_center=self.sr*x_center#查询切点位置
16
- self.t_max=self.sr*x_max#免查询时长阈值
17
- self.device=device
18
- self.is_half=is_half
19
-
20
- def get_f0(self,x, p_len,f0_up_key,f0_method,inp_f0=None):
21
- time_step = self.window / self.sr * 1000
22
- f0_min = 50
23
- f0_max = 1100
24
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
25
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
26
- if(f0_method=="pm"):
27
- f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
28
- time_step=time_step / 1000, voicing_threshold=0.6,
29
- pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
30
- pad_size=(p_len - len(f0) + 1) // 2
31
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
32
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
33
- elif(f0_method=="harvest"):
34
- f0, t = pyworld.harvest(
35
- x.astype(np.double),
36
- fs=self.sr,
37
- f0_ceil=f0_max,
38
- f0_floor=f0_min,
39
- frame_period=10,
40
- )
41
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
42
- f0 = signal.medfilt(f0, 3)
43
- f0 *= pow(2, f0_up_key / 12)
44
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
45
- tf0=self.sr//self.window#每秒f0点数
46
- if (inp_f0 is not None):
47
- delta_t=np.round((inp_f0[:,0].max()-inp_f0[:,0].min())*tf0+1).astype("int16")
48
- replace_f0=np.interp(list(range(delta_t)), inp_f0[:, 0]*100, inp_f0[:, 1])
49
- shape=f0[x_pad*tf0:x_pad*tf0+len(replace_f0)].shape[0]
50
- f0[x_pad*tf0:x_pad*tf0+len(replace_f0)]=replace_f0[:shape]
51
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
52
- f0bak = f0.copy()
53
- f0_mel = 1127 * np.log(1 + f0 / 700)
54
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
55
- f0_mel[f0_mel <= 1] = 1
56
- f0_mel[f0_mel > 255] = 255
57
- f0_coarse = np.rint(f0_mel).astype(int)
58
- return f0_coarse, f0bak#1-0
59
-
60
- def vc(self,model,net_g,sid,audio0,pitch,pitchf,times,index,big_npy,index_rate):#,file_index,file_big_npy
61
- feats = torch.from_numpy(audio0)
62
- if(self.is_half):feats=feats.half()
63
- else:feats=feats.float()
64
- if feats.dim() == 2: # double channels
65
- feats = feats.mean(-1)
66
- assert feats.dim() == 1, feats.dim()
67
- feats = feats.view(1, -1)
68
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
69
-
70
- inputs = {
71
- "source": feats.to(self.device),
72
- "padding_mask": padding_mask,
73
- "output_layer": 9, # layer 9
74
- }
75
- t0 = ttime()
76
- with torch.no_grad():
77
- logits = model.extract_features(**inputs)
78
- feats = model.final_proj(logits[0])
79
-
80
- if(isinstance(index,type(None))==False and isinstance(big_npy,type(None))==False and index_rate!=0):
81
- npy = feats[0].cpu().numpy()
82
- if(self.is_half):npy=npy.astype("float32")
83
- _, I = index.search(npy, 1)
84
- npy=big_npy[I.squeeze()]
85
- if(self.is_half):npy=npy.astype("float16")
86
- feats = torch.from_numpy(npy).unsqueeze(0).to(self.device)*index_rate + (1-index_rate)*feats
87
-
88
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
89
- t1 = ttime()
90
- p_len = audio0.shape[0]//self.window
91
- if(feats.shape[1]<p_len):
92
- p_len=feats.shape[1]
93
- if(pitch!=None and pitchf!=None):
94
- pitch=pitch[:,:p_len]
95
- pitchf=pitchf[:,:p_len]
96
- p_len=torch.tensor([p_len],device=self.device).long()
97
- with torch.no_grad():
98
- if(pitch!=None and pitchf!=None):
99
- audio1 = (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
100
- else:
101
- audio1 = (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
102
- del feats,p_len,padding_mask
103
- if torch.cuda.is_available(): torch.cuda.empty_cache()
104
- t2 = ttime()
105
- times[0] += (t1 - t0)
106
- times[2] += (t2 - t1)
107
- return audio1
108
-
109
- def pipeline(self,model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=None):
110
- if(file_big_npy!=""and file_index!=""and os.path.exists(file_big_npy)==True and os.path.exists(file_index)==True and index_rate!=0):
111
- try:
112
- index = faiss.read_index(file_index)
113
- big_npy = np.load(file_big_npy)
114
- except:
115
- traceback.print_exc()
116
- index=big_npy=None
117
- else:
118
- index=big_npy=None
119
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode='reflect')
120
- opt_ts = []
121
- if(audio_pad.shape[0]>self.t_max):
122
- audio_sum = np.zeros_like(audio)
123
- for i in range(self.window): audio_sum += audio_pad[i:i - self.window]
124
- for t in range(self.t_center, audio.shape[0],self.t_center):opt_ts.append(t - self.t_query + np.where(np.abs(audio_sum[t - self.t_query:t + self.t_query]) == np.abs(audio_sum[t - self.t_query:t + self.t_query]).min())[0][0])
125
- s = 0
126
- audio_opt=[]
127
- t=None
128
- t1=ttime()
129
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
130
- p_len=audio_pad.shape[0]//self.window
131
- inp_f0=None
132
- if(hasattr(f0_file,'name') ==True):
133
- try:
134
- with open(f0_file.name,"r")as f:
135
- lines=f.read().strip("\n").split("\n")
136
- inp_f0=[]
137
- for line in lines:inp_f0.append([float(i)for i in line.split(",")])
138
- inp_f0=np.array(inp_f0,dtype="float32")
139
- except:
140
- traceback.print_exc()
141
- sid=torch.tensor(sid,device=self.device).unsqueeze(0).long()
142
- pitch, pitchf=None,None
143
- if(if_f0==1):
144
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key,f0_method,inp_f0)
145
- pitch = pitch[:p_len]
146
- pitchf = pitchf[:p_len]
147
- pitch = torch.tensor(pitch,device=self.device).unsqueeze(0).long()
148
- pitchf = torch.tensor(pitchf,device=self.device).unsqueeze(0).float()
149
- t2=ttime()
150
- times[1] += (t2 - t1)
151
- for t in opt_ts:
152
- t=t//self.window*self.window
153
- if (if_f0 == 1):
154
- audio_opt.append(self.vc(model,net_g,sid,audio_pad[s:t+self.t_pad2+self.window],pitch[:,s//self.window:(t+self.t_pad2)//self.window],pitchf[:,s//self.window:(t+self.t_pad2)//self.window],times,index,big_npy,index_rate)[self.t_pad_tgt:-self.t_pad_tgt])
155
- else:
156
- audio_opt.append(self.vc(model,net_g,sid,audio_pad[s:t+self.t_pad2+self.window],None,None,times,index,big_npy,index_rate)[self.t_pad_tgt:-self.t_pad_tgt])
157
- s = t
158
- if (if_f0 == 1):
159
- audio_opt.append(self.vc(model,net_g,sid,audio_pad[t:],pitch[:,t//self.window:]if t is not None else pitch,pitchf[:,t//self.window:]if t is not None else pitchf,times,index,big_npy,index_rate)[self.t_pad_tgt:-self.t_pad_tgt])
160
- else:
161
- audio_opt.append(self.vc(model,net_g,sid,audio_pad[t:],None,None,times,index,big_npy,index_rate)[self.t_pad_tgt:-self.t_pad_tgt])
162
- audio_opt=np.concatenate(audio_opt)
163
- del pitch,pitchf,sid
164
- if torch.cuda.is_available(): torch.cuda.empty_cache()
165
- return audio_opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np, parselmouth, torch, pdb
2
+ from time import time as ttime
3
+ import torch.nn.functional as F
4
+ import scipy.signal as signal
5
+ import pyworld, os, traceback, faiss,librosa
6
+ from scipy import signal
7
+ from functools import lru_cache
8
+
9
+ bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
10
+
11
+ input_audio_path2wav={}
12
+ @lru_cache
13
+ def cache_harvest_f0(input_audio_path,fs,f0max,f0min,frame_period):
14
+ audio=input_audio_path2wav[input_audio_path]
15
+ f0, t = pyworld.harvest(
16
+ audio,
17
+ fs=fs,
18
+ f0_ceil=f0max,
19
+ f0_floor=f0min,
20
+ frame_period=frame_period,
21
+ )
22
+ f0 = pyworld.stonemask(audio, f0, t, fs)
23
+ return f0
24
+
25
+ def change_rms(data1,sr1,data2,sr2,rate):#1是输入音频,2是输出音频,rate是2的占比
26
+ # print(data1.max(),data2.max())
27
+ rms1 = librosa.feature.rms(y=data1, frame_length=sr1//2*2, hop_length=sr1//2)#每半秒一个点
28
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2//2*2, hop_length=sr2//2)
29
+ rms1=torch.from_numpy(rms1)
30
+ rms1=F.interpolate(rms1.unsqueeze(0), size=data2.shape[0],mode='linear').squeeze()
31
+ rms2=torch.from_numpy(rms2)
32
+ rms2=F.interpolate(rms2.unsqueeze(0), size=data2.shape[0],mode='linear').squeeze()
33
+ rms2=torch.max(rms2,torch.zeros_like(rms2)+1e-6)
34
+ data2*=(torch.pow(rms1,torch.tensor(1-rate))*torch.pow(rms2,torch.tensor(rate-1))).numpy()
35
+ return data2
36
+
37
+ class VC(object):
38
+ def __init__(self, tgt_sr, config):
39
+ self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
40
+ config.x_pad,
41
+ config.x_query,
42
+ config.x_center,
43
+ config.x_max,
44
+ config.is_half,
45
+ )
46
+ self.sr = 16000 # hubert输入采样率
47
+ self.window = 160 # 每帧点数
48
+ self.t_pad = self.sr * self.x_pad # 每条前后pad时间
49
+ self.t_pad_tgt = tgt_sr * self.x_pad
50
+ self.t_pad2 = self.t_pad * 2
51
+ self.t_query = self.sr * self.x_query # 查询切点前后查询时间
52
+ self.t_center = self.sr * self.x_center # 查询切点位置
53
+ self.t_max = self.sr * self.x_max # 免查询时长阈值
54
+ self.device = config.device
55
+
56
+ def get_f0(self, input_audio_path,x, p_len, f0_up_key, f0_method,filter_radius, inp_f0=None):
57
+ global input_audio_path2wav
58
+ time_step = self.window / self.sr * 1000
59
+ f0_min = 50
60
+ f0_max = 1100
61
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
62
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
63
+ if f0_method == "pm":
64
+ f0 = (
65
+ parselmouth.Sound(x, self.sr)
66
+ .to_pitch_ac(
67
+ time_step=time_step / 1000,
68
+ voicing_threshold=0.6,
69
+ pitch_floor=f0_min,
70
+ pitch_ceiling=f0_max,
71
+ )
72
+ .selected_array["frequency"]
73
+ )
74
+ pad_size = (p_len - len(f0) + 1) // 2
75
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
76
+ f0 = np.pad(
77
+ f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
78
+ )
79
+ elif f0_method == "harvest":
80
+ input_audio_path2wav[input_audio_path]=x.astype(np.double)
81
+ f0=cache_harvest_f0(input_audio_path,self.sr,f0_max,f0_min,10)
82
+ if(filter_radius>2):
83
+ f0 = signal.medfilt(f0, 3)
84
+ f0 *= pow(2, f0_up_key / 12)
85
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
86
+ tf0 = self.sr // self.window # 每秒f0点数
87
+ if inp_f0 is not None:
88
+ delta_t = np.round(
89
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
90
+ ).astype("int16")
91
+ replace_f0 = np.interp(
92
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
93
+ )
94
+ shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
95
+ f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
96
+ :shape
97
+ ]
98
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
99
+ f0bak = f0.copy()
100
+ f0_mel = 1127 * np.log(1 + f0 / 700)
101
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
102
+ f0_mel_max - f0_mel_min
103
+ ) + 1
104
+ f0_mel[f0_mel <= 1] = 1
105
+ f0_mel[f0_mel > 255] = 255
106
+ f0_coarse = np.rint(f0_mel).astype(int)
107
+ return f0_coarse, f0bak # 1-0
108
+
109
+ def vc(
110
+ self,
111
+ model,
112
+ net_g,
113
+ sid,
114
+ audio0,
115
+ pitch,
116
+ pitchf,
117
+ times,
118
+ index,
119
+ big_npy,
120
+ index_rate,
121
+ version,
122
+ ): # ,file_index,file_big_npy
123
+ feats = torch.from_numpy(audio0)
124
+ if self.is_half:
125
+ feats = feats.half()
126
+ else:
127
+ feats = feats.float()
128
+ if feats.dim() == 2: # double channels
129
+ feats = feats.mean(-1)
130
+ assert feats.dim() == 1, feats.dim()
131
+ feats = feats.view(1, -1)
132
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
133
+
134
+ inputs = {
135
+ "source": feats.to(self.device),
136
+ "padding_mask": padding_mask,
137
+ "output_layer": 9 if version == "v1" else 12,
138
+ }
139
+ t0 = ttime()
140
+ with torch.no_grad():
141
+ logits = model.extract_features(**inputs)
142
+ feats = model.final_proj(logits[0])if version=="v1"else logits[0]
143
+
144
+ if (
145
+ isinstance(index, type(None)) == False
146
+ and isinstance(big_npy, type(None)) == False
147
+ and index_rate != 0
148
+ ):
149
+ npy = feats[0].cpu().numpy()
150
+ if self.is_half:
151
+ npy = npy.astype("float32")
152
+
153
+ # _, I = index.search(npy, 1)
154
+ # npy = big_npy[I.squeeze()]
155
+
156
+ score, ix = index.search(npy, k=8)
157
+ weight = np.square(1 / score)
158
+ weight /= weight.sum(axis=1, keepdims=True)
159
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
160
+
161
+ if self.is_half:
162
+ npy = npy.astype("float16")
163
+ feats = (
164
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
165
+ + (1 - index_rate) * feats
166
+ )
167
+
168
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
169
+ t1 = ttime()
170
+ p_len = audio0.shape[0] // self.window
171
+ if feats.shape[1] < p_len:
172
+ p_len = feats.shape[1]
173
+ if pitch != None and pitchf != None:
174
+ pitch = pitch[:, :p_len]
175
+ pitchf = pitchf[:, :p_len]
176
+ p_len = torch.tensor([p_len], device=self.device).long()
177
+ with torch.no_grad():
178
+ if pitch != None and pitchf != None:
179
+ audio1 = (
180
+ (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
181
+ .data.cpu()
182
+ .float()
183
+ .numpy()
184
+ )
185
+ else:
186
+ audio1 = (
187
+ (net_g.infer(feats, p_len, sid)[0][0, 0])
188
+ .data.cpu()
189
+ .float()
190
+ .numpy()
191
+ )
192
+ del feats, p_len, padding_mask
193
+ if torch.cuda.is_available():
194
+ torch.cuda.empty_cache()
195
+ t2 = ttime()
196
+ times[0] += t1 - t0
197
+ times[2] += t2 - t1
198
+ return audio1
199
+
200
+ def pipeline(
201
+ self,
202
+ model,
203
+ net_g,
204
+ sid,
205
+ audio,
206
+ input_audio_path,
207
+ times,
208
+ f0_up_key,
209
+ f0_method,
210
+ file_index,
211
+ # file_big_npy,
212
+ index_rate,
213
+ if_f0,
214
+ filter_radius,
215
+ tgt_sr,
216
+ resample_sr,
217
+ rms_mix_rate,
218
+ version,
219
+ f0_file=None,
220
+ ):
221
+ if (
222
+ file_index != ""
223
+ # and file_big_npy != ""
224
+ # and os.path.exists(file_big_npy) == True
225
+ and os.path.exists(file_index) == True
226
+ and index_rate != 0
227
+ ):
228
+ try:
229
+ index = faiss.read_index(file_index)
230
+ # big_npy = np.load(file_big_npy)
231
+ big_npy = index.reconstruct_n(0, index.ntotal)
232
+ except:
233
+ traceback.print_exc()
234
+ index = big_npy = None
235
+ else:
236
+ index = big_npy = None
237
+ audio = signal.filtfilt(bh, ah, audio)
238
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
239
+ opt_ts = []
240
+ if audio_pad.shape[0] > self.t_max:
241
+ audio_sum = np.zeros_like(audio)
242
+ for i in range(self.window):
243
+ audio_sum += audio_pad[i : i - self.window]
244
+ for t in range(self.t_center, audio.shape[0], self.t_center):
245
+ opt_ts.append(
246
+ t
247
+ - self.t_query
248
+ + np.where(
249
+ np.abs(audio_sum[t - self.t_query : t + self.t_query])
250
+ == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
251
+ )[0][0]
252
+ )
253
+ s = 0
254
+ audio_opt = []
255
+ t = None
256
+ t1 = ttime()
257
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
258
+ p_len = audio_pad.shape[0] // self.window
259
+ inp_f0 = None
260
+ if hasattr(f0_file, "name") == True:
261
+ try:
262
+ with open(f0_file.name, "r") as f:
263
+ lines = f.read().strip("\n").split("\n")
264
+ inp_f0 = []
265
+ for line in lines:
266
+ inp_f0.append([float(i) for i in line.split(",")])
267
+ inp_f0 = np.array(inp_f0, dtype="float32")
268
+ except:
269
+ traceback.print_exc()
270
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
271
+ pitch, pitchf = None, None
272
+ if if_f0 == 1:
273
+ pitch, pitchf = self.get_f0(input_audio_path,audio_pad, p_len, f0_up_key, f0_method,filter_radius, inp_f0)
274
+ pitch = pitch[:p_len]
275
+ pitchf = pitchf[:p_len]
276
+ if self.device == "mps":
277
+ pitchf = pitchf.astype(np.float32)
278
+ pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
279
+ pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
280
+ t2 = ttime()
281
+ times[1] += t2 - t1
282
+ for t in opt_ts:
283
+ t = t // self.window * self.window
284
+ if if_f0 == 1:
285
+ audio_opt.append(
286
+ self.vc(
287
+ model,
288
+ net_g,
289
+ sid,
290
+ audio_pad[s : t + self.t_pad2 + self.window],
291
+ pitch[:, s // self.window : (t + self.t_pad2) // self.window],
292
+ pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
293
+ times,
294
+ index,
295
+ big_npy,
296
+ index_rate,
297
+ version,
298
+ )[self.t_pad_tgt : -self.t_pad_tgt]
299
+ )
300
+ else:
301
+ audio_opt.append(
302
+ self.vc(
303
+ model,
304
+ net_g,
305
+ sid,
306
+ audio_pad[s : t + self.t_pad2 + self.window],
307
+ None,
308
+ None,
309
+ times,
310
+ index,
311
+ big_npy,
312
+ index_rate,
313
+ version,
314
+ )[self.t_pad_tgt : -self.t_pad_tgt]
315
+ )
316
+ s = t
317
+ if if_f0 == 1:
318
+ audio_opt.append(
319
+ self.vc(
320
+ model,
321
+ net_g,
322
+ sid,
323
+ audio_pad[t:],
324
+ pitch[:, t // self.window :] if t is not None else pitch,
325
+ pitchf[:, t // self.window :] if t is not None else pitchf,
326
+ times,
327
+ index,
328
+ big_npy,
329
+ index_rate,
330
+ version,
331
+ )[self.t_pad_tgt : -self.t_pad_tgt]
332
+ )
333
+ else:
334
+ audio_opt.append(
335
+ self.vc(
336
+ model,
337
+ net_g,
338
+ sid,
339
+ audio_pad[t:],
340
+ None,
341
+ None,
342
+ times,
343
+ index,
344
+ big_npy,
345
+ index_rate,
346
+ version,
347
+ )[self.t_pad_tgt : -self.t_pad_tgt]
348
+ )
349
+ audio_opt = np.concatenate(audio_opt)
350
+ if(rms_mix_rate!=1):
351
+ audio_opt=change_rms(audio,16000,audio_opt,tgt_sr,rms_mix_rate)
352
+ if(resample_sr>=16000 and tgt_sr!=resample_sr):
353
+ audio_opt = librosa.resample(
354
+ audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
355
+ )
356
+ audio_max=np.abs(audio_opt).max()/0.99
357
+ max_int16=32768
358
+ if(audio_max>1):max_int16/=audio_max
359
+ audio_opt=(audio_opt * max_int16).astype(np.int16)
360
+ del pitch, pitchf, sid
361
+ if torch.cuda.is_available():
362
+ torch.cuda.empty_cache()
363
+ return audio_opt