bodhisativa commited on
Commit
2e639e9
·
verified ·
1 Parent(s): 902503d

Mirror lj1995/VoiceConversionWebUI @ b2c8cae96e3b — vc_infer_pipeline.py

Browse files
Files changed (1) hide show
  1. vc_infer_pipeline.py +225 -0
vc_infer_pipeline.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np,parselmouth,torch,pdb
2
+ from time import time as ttime
3
+ import torch.nn.functional as F
4
+ from config import x_pad,x_query,x_center,x_max
5
+ from sklearn.cluster import KMeans
6
+
7
+ def resize2d(x, target_len,is1):
8
+ minn=1 if is1==True else 0
9
+ ss = np.array(x).astype("float32")
10
+ ss[ss <=minn] = np.nan
11
+ target = np.interp(np.arange(0, len(ss) * target_len, len(ss)) / target_len, np.arange(0, len(ss)), ss)
12
+ res = np.nan_to_num(target)
13
+ return res
14
+
15
+ class VC(object):
16
+ def __init__(self,tgt_sr,device,is_half):
17
+ self.sr=16000#hubert输入采样率
18
+ self.window=160#每帧点数
19
+ self.t_pad=self.sr*x_pad#每条前后pad时间
20
+ self.t_pad_tgt=tgt_sr*x_pad
21
+ self.t_pad2=self.t_pad*2
22
+ self.t_query=self.sr*x_query#查询切点前后查询时间
23
+ self.t_center=self.sr*x_center#查询切点位置
24
+ self.t_max=self.sr*x_max#免查询时长阈值
25
+ self.device=device
26
+ self.is_half=is_half
27
+
28
+ def get_f0(self,x, p_len,f0_up_key=0,inp_f0=None):
29
+ time_step = self.window / self.sr * 1000
30
+ f0_min = 50
31
+ f0_max = 1100
32
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
33
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
34
+ f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
35
+ time_step=time_step / 1000, voicing_threshold=0.6,
36
+ pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
37
+ pad_size=(p_len - len(f0) + 1) // 2
38
+ if(pad_size>0 or p_len - len(f0) - pad_size>0):
39
+ f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
40
+ f0 *= pow(2, f0_up_key / 12)
41
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
42
+ tf0=self.sr//self.window#每秒f0点数
43
+ if (inp_f0 is not None):
44
+ delta_t=np.round((inp_f0[:,0].max()-inp_f0[:,0].min())*tf0+1).astype("int16")
45
+ replace_f0=np.interp(list(range(delta_t)), inp_f0[:, 0]*100, inp_f0[:, 1])
46
+ shape=f0[x_pad*tf0:x_pad*tf0+len(replace_f0)].shape[0]
47
+ f0[x_pad*tf0:x_pad*tf0+len(replace_f0)]=replace_f0[:shape]
48
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
49
+ f0bak = f0.copy()
50
+ f0_mel = 1127 * np.log(1 + f0 / 700)
51
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
52
+ f0_mel[f0_mel <= 1] = 1
53
+ f0_mel[f0_mel > 255] = 255
54
+ f0_coarse = np.rint(f0_mel).astype(np.int)
55
+ return f0_coarse, f0bak#1-0
56
+
57
+ def vc(self,model,net_g,dv,audio0,pitch,pitchf,times):
58
+ feats = torch.from_numpy(audio0)
59
+ if(self.is_half==True):feats=feats.half()
60
+ else:feats=feats.float()
61
+ if feats.dim() == 2: # double channels
62
+ feats = feats.mean(-1)
63
+ assert feats.dim() == 1, feats.dim()
64
+ feats = feats.view(1, -1)
65
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
66
+
67
+ inputs = {
68
+ "source": feats.to(self.device),
69
+ "padding_mask": padding_mask.to(self.device),
70
+ "output_layer": 9, # layer 9
71
+ }
72
+ t0 = ttime()
73
+ with torch.no_grad():
74
+ logits = model.extract_features(**inputs)
75
+ feats = model.final_proj(logits[0])
76
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
77
+ t1 = ttime()
78
+ p_len = audio0.shape[0]//self.window
79
+ if(feats.shape[1]<p_len):
80
+ p_len=feats.shape[1]
81
+ pitch=pitch[:,:p_len]
82
+ pitchf=pitchf[:,:p_len]
83
+ p_len=torch.LongTensor([p_len]).to(self.device)
84
+ with torch.no_grad():
85
+ audio1 = (net_g.infer(feats, p_len, pitch, pitchf, dv)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
86
+ del feats,p_len,padding_mask
87
+ torch.cuda.empty_cache()
88
+ t2 = ttime()
89
+ times[0] += (t1 - t0)
90
+ times[2] += (t2 - t1)
91
+ return audio1
92
+ def vc_km(self,model,net_g,dv,audio0,pitch,pitchf,times):
93
+ kmeans = KMeans(500)
94
+ def get_cluster_result(x):
95
+ """x: np.array [t, 256]"""
96
+ return kmeans.predict(x)
97
+ checkpoint = torch.load("lulu_contentvec_kmeans_500.pt")
98
+ kmeans.__dict__["n_features_in_"] = checkpoint["n_features_in_"]
99
+ kmeans.__dict__["_n_threads"] = checkpoint["_n_threads"]
100
+ kmeans.__dict__["cluster_centers_"] = checkpoint["cluster_centers_"]
101
+ feats = torch.from_numpy(audio0).float()
102
+ if feats.dim() == 2: # double channels
103
+ feats = feats.mean(-1)
104
+ assert feats.dim() == 1, feats.dim()
105
+ feats = feats.view(1, -1)
106
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
107
+ inputs = {
108
+ "source": feats.half().to(self.device),
109
+ "padding_mask": padding_mask.to(self.device),
110
+ "output_layer": 9, # layer 9
111
+ }
112
+ torch.cuda.synchronize()
113
+ t0 = ttime()
114
+ with torch.no_grad():
115
+ logits = model.extract_features(**inputs)
116
+ feats = model.final_proj(logits[0])
117
+ feats = get_cluster_result(feats.cpu().numpy()[0].astype("float32"))
118
+ feats = torch.from_numpy(feats).to(self.device)
119
+ feats = F.interpolate(feats.half().unsqueeze(0).unsqueeze(0), scale_factor=2).long().squeeze(0)
120
+ t1 = ttime()
121
+ p_len = audio0.shape[0]//self.window
122
+ if(feats.shape[1]<p_len):
123
+ p_len=feats.shape[1]
124
+ pitch=pitch[:,:p_len]
125
+ pitchf=pitchf[:,:p_len]
126
+ p_len=torch.LongTensor([p_len]).to(self.device)
127
+ with torch.no_grad():
128
+ audio1 = (net_g.infer(feats, p_len, pitch, pitchf, dv)[0][0, 0] * 32768).data.cpu().float().numpy().astype(np.int16)
129
+ del feats,p_len,padding_mask
130
+ torch.cuda.empty_cache()
131
+ t2 = ttime()
132
+ times[0] += (t1 - t0)
133
+ times[2] += (t2 - t1)
134
+ return audio1
135
+
136
+ def pipeline(self,model,net_g,dv,audio,times,f0_up_key,f0_file=None):
137
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode='reflect')
138
+ opt_ts = []
139
+ if(audio_pad.shape[0]>self.t_max):
140
+ audio_sum = np.zeros_like(audio)
141
+ for i in range(self.window): audio_sum += audio_pad[i:i - self.window]
142
+ for t in range(self.t_center, audio.shape[0],self.t_center):opt_ts.append(t - self.t_query + np.where(np.abs(audio_sum[t - self.t_query:t + self.t_query]) == np.abs(audio_sum[t - self.t_query:t + self.t_query]).min())[0][0])
143
+ s = 0
144
+ audio_opt=[]
145
+ t=None
146
+ t1=ttime()
147
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
148
+ p_len=audio_pad.shape[0]//self.window
149
+ inp_f0=None
150
+ if(hasattr(f0_file,'name') ==True):
151
+ try:
152
+ with open(f0_file.name,"r")as f:
153
+ lines=f.read().strip("\n").split("\n")
154
+ inp_f0=[]
155
+ for line in lines:inp_f0.append([float(i)for i in line.split(",")])
156
+ inp_f0=np.array(inp_f0,dtype="float32")
157
+ except:
158
+ traceback.print_exc()
159
+ pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key,inp_f0)
160
+
161
+ pitch = pitch[:p_len]
162
+ pitchf = pitchf[:p_len]
163
+ # if(inp_f0 is None):
164
+ # pitch = pitch[:p_len]
165
+ # pitchf = pitchf[:p_len]
166
+ # else:
167
+ # pitch=resize2d(pitch,p_len,is1=True)
168
+ # pitchf=resize2d(pitchf,p_len,is1=False)
169
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(self.device)
170
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(self.device)
171
+ t2=ttime()
172
+ times[1] += (t2 - t1)
173
+ for t in opt_ts:
174
+ t=t//self.window*self.window
175
+ audio_opt.append(self.vc(model,net_g,dv,audio_pad[s:t+self.t_pad2+self.window],pitch[:,s//self.window:(t+self.t_pad2)//self.window],pitchf[:,s//self.window:(t+self.t_pad2)//self.window],times)[self.t_pad_tgt:-self.t_pad_tgt])
176
+ s = t
177
+ audio_opt.append(self.vc(model,net_g,dv,audio_pad[t:],pitch[:,t//self.window:]if t is not None else pitch,pitchf[:,t//self.window:]if t is not None else pitchf,times)[self.t_pad_tgt:-self.t_pad_tgt])
178
+ audio_opt=np.concatenate(audio_opt)
179
+ del pitch,pitchf
180
+ return audio_opt
181
+ def pipeline_km(self,model,net_g,dv,audio,times,f0_up_key,f0_file=None):
182
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode='reflect')
183
+ opt_ts = []
184
+ if(audio_pad.shape[0]>self.t_max):
185
+ audio_sum = np.zeros_like(audio)
186
+ for i in range(self.window): audio_sum += audio_pad[i:i - self.window]
187
+ for t in range(self.t_center, audio.shape[0],self.t_center):opt_ts.append(t - self.t_query + np.where(np.abs(audio_sum[t - self.t_query:t + self.t_query]) == np.abs(audio_sum[t - self.t_query:t + self.t_query]).min())[0][0])
188
+ s = 0
189
+ audio_opt=[]
190
+ t=None
191
+ t1=ttime()
192
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
193
+ p_len=audio_pad.shape[0]//self.window
194
+ inp_f0=None
195
+ if(hasattr(f0_file,'name') ==True):
196
+ try:
197
+ with open(f0_file.name,"r")as f:
198
+ lines=f.read().strip("\n").split("\n")
199
+ inp_f0=[]
200
+ for line in lines:inp_f0.append([float(i)for i in line.split(",")])
201
+ inp_f0=np.array(inp_f0,dtype="float32")
202
+ except:
203
+ traceback.print_exc()
204
+ pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key,inp_f0)
205
+
206
+ pitch = pitch[:p_len]
207
+ pitchf = pitchf[:p_len]
208
+ # if(inp_f0 is None):
209
+ # pitch = pitch[:p_len]
210
+ # pitchf = pitchf[:p_len]
211
+ # else:
212
+ # pitch=resize2d(pitch,p_len,is1=True)
213
+ # pitchf=resize2d(pitchf,p_len,is1=False)
214
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(self.device)
215
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(self.device)
216
+ t2=ttime()
217
+ times[1] += (t2 - t1)
218
+ for t in opt_ts:
219
+ t=t//self.window*self.window
220
+ audio_opt.append(self.vc_km(model,net_g,dv,audio_pad[s:t+self.t_pad2+self.window],pitch[:,s//self.window:(t+self.t_pad2)//self.window],pitchf[:,s//self.window:(t+self.t_pad2)//self.window],times)[self.t_pad_tgt:-self.t_pad_tgt])
221
+ s = t
222
+ audio_opt.append(self.vc_km(model,net_g,dv,audio_pad[t:],pitch[:,t//self.window:]if t is not None else pitch,pitchf[:,t//self.window:]if t is not None else pitchf,times)[self.t_pad_tgt:-self.t_pad_tgt])
223
+ audio_opt=np.concatenate(audio_opt)
224
+ del pitch,pitchf
225
+ return audio_opt