Plachta commited on
Commit
35aaf1e
1 Parent(s): f24a851

Upload 35 files

Browse files
ONNXVITS_infer.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import commons
3
+ import models
4
+ class SynthesizerTrn(models.SynthesizerTrn):
5
+ """
6
+ Synthesizer for Training
7
+ """
8
+
9
+ def __init__(self,
10
+ n_vocab,
11
+ spec_channels,
12
+ segment_size,
13
+ inter_channels,
14
+ hidden_channels,
15
+ filter_channels,
16
+ n_heads,
17
+ n_layers,
18
+ kernel_size,
19
+ p_dropout,
20
+ resblock,
21
+ resblock_kernel_sizes,
22
+ resblock_dilation_sizes,
23
+ upsample_rates,
24
+ upsample_initial_channel,
25
+ upsample_kernel_sizes,
26
+ n_speakers=0,
27
+ gin_channels=0,
28
+ use_sdp=True,
29
+ **kwargs):
30
+
31
+ super().__init__(
32
+ n_vocab,
33
+ spec_channels,
34
+ segment_size,
35
+ inter_channels,
36
+ hidden_channels,
37
+ filter_channels,
38
+ n_heads,
39
+ n_layers,
40
+ kernel_size,
41
+ p_dropout,
42
+ resblock,
43
+ resblock_kernel_sizes,
44
+ resblock_dilation_sizes,
45
+ upsample_rates,
46
+ upsample_initial_channel,
47
+ upsample_kernel_sizes,
48
+ n_speakers=n_speakers,
49
+ gin_channels=gin_channels,
50
+ use_sdp=use_sdp,
51
+ **kwargs
52
+ )
53
+
54
+ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
55
+ from ONNXVITS_utils import runonnx
56
+
57
+ #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
58
+ x, m_p, logs_p, x_mask = runonnx("ONNX_net/enc_p.onnx", x=x.numpy(), x_lengths=x_lengths.numpy())
59
+ x = torch.from_numpy(x)
60
+ m_p = torch.from_numpy(m_p)
61
+ logs_p = torch.from_numpy(logs_p)
62
+ x_mask = torch.from_numpy(x_mask)
63
+
64
+ if self.n_speakers > 0:
65
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
66
+ else:
67
+ g = None
68
+
69
+ #logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
70
+ logw = runonnx("ONNX_net/dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy())
71
+ logw = torch.from_numpy(logw[0])
72
+
73
+ w = torch.exp(logw) * x_mask * length_scale
74
+ w_ceil = torch.ceil(w)
75
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
76
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
77
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
78
+ attn = commons.generate_path(w_ceil, attn_mask)
79
+
80
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
81
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
82
+
83
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
84
+
85
+ #z = self.flow(z_p, y_mask, g=g, reverse=True)
86
+ z = runonnx("ONNX_net/flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy())
87
+ z = torch.from_numpy(z[0])
88
+
89
+ #o = self.dec((z * y_mask)[:,:,:max_len], g=g)
90
+ o = runonnx("ONNX_net/dec.onnx", z_in=(z * y_mask)[:,:,:max_len].numpy(), g=g.numpy())
91
+ o = torch.from_numpy(o[0])
92
+
93
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
ONNXVITS_inference.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ logging.getLogger('numba').setLevel(logging.WARNING)
3
+ import IPython.display as ipd
4
+ import torch
5
+ import commons
6
+ import utils
7
+ import ONNXVITS_infer
8
+ from text import text_to_sequence
9
+
10
+ def get_text(text, hps):
11
+ text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
12
+ if hps.data.add_blank:
13
+ text_norm = commons.intersperse(text_norm, 0)
14
+ text_norm = torch.LongTensor(text_norm)
15
+ return text_norm
16
+
17
+ hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json")
18
+
19
+ net_g = ONNXVITS_infer.SynthesizerTrn(
20
+ len(hps.symbols),
21
+ hps.data.filter_length // 2 + 1,
22
+ hps.train.segment_size // hps.data.hop_length,
23
+ n_speakers=hps.data.n_speakers,
24
+ **hps.model)
25
+ _ = net_g.eval()
26
+
27
+ _ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g)
28
+
29
+ text1 = get_text("おはようございます。", hps)
30
+ stn_tst = text1
31
+ with torch.no_grad():
32
+ x_tst = stn_tst.unsqueeze(0)
33
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
34
+ sid = torch.LongTensor([0])
35
+ audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
36
+ print(audio)
ONNXVITS_models.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ import commons
8
+ import ONNXVITS_modules as modules
9
+ import attentions
10
+ import monotonic_align
11
+
12
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
13
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
14
+ from commons import init_weights, get_padding
15
+
16
+
17
+ class StochasticDurationPredictor(nn.Module):
18
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
19
+ super().__init__()
20
+ filter_channels = in_channels # it needs to be removed from future version.
21
+ self.in_channels = in_channels
22
+ self.filter_channels = filter_channels
23
+ self.kernel_size = kernel_size
24
+ self.p_dropout = p_dropout
25
+ self.n_flows = n_flows
26
+ self.gin_channels = gin_channels
27
+
28
+ self.log_flow = modules.Log()
29
+ self.flows = nn.ModuleList()
30
+ self.flows.append(modules.ElementwiseAffine(2))
31
+ for i in range(n_flows):
32
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
33
+ self.flows.append(modules.Flip())
34
+
35
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
36
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
37
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
38
+ self.post_flows = nn.ModuleList()
39
+ self.post_flows.append(modules.ElementwiseAffine(2))
40
+ for i in range(4):
41
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
42
+ self.post_flows.append(modules.Flip())
43
+
44
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
45
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
46
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
47
+ if gin_channels != 0:
48
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
49
+
50
+ self.w = None
51
+ self.reverse = None
52
+ self.noise_scale = None
53
+ def forward(self, x, x_mask, g=None):
54
+ w = self.w
55
+ reverse = self.reverse
56
+ noise_scale = self.noise_scale
57
+
58
+ x = torch.detach(x)
59
+ x = self.pre(x)
60
+ if g is not None:
61
+ g = torch.detach(g)
62
+ x = x + self.cond(g)
63
+ x = self.convs(x, x_mask)
64
+ x = self.proj(x) * x_mask
65
+
66
+ if not reverse:
67
+ flows = self.flows
68
+ assert w is not None
69
+
70
+ logdet_tot_q = 0
71
+ h_w = self.post_pre(w)
72
+ h_w = self.post_convs(h_w, x_mask)
73
+ h_w = self.post_proj(h_w) * x_mask
74
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
75
+ z_q = e_q
76
+ for flow in self.post_flows:
77
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
78
+ logdet_tot_q += logdet_q
79
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
80
+ u = torch.sigmoid(z_u) * x_mask
81
+ z0 = (w - u) * x_mask
82
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
83
+ logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
84
+
85
+ logdet_tot = 0
86
+ z0, logdet = self.log_flow(z0, x_mask)
87
+ logdet_tot += logdet
88
+ z = torch.cat([z0, z1], 1)
89
+ for flow in flows:
90
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
91
+ logdet_tot = logdet_tot + logdet
92
+ nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
93
+ return nll + logq # [b]
94
+ else:
95
+ flows = list(reversed(self.flows))
96
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
97
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
98
+ for flow in flows:
99
+ z = flow(z, x_mask, g=x, reverse=reverse)
100
+ z0, z1 = torch.split(z, [1, 1], 1)
101
+ logw = z0
102
+ return logw
103
+
104
+
105
+ class TextEncoder(nn.Module):
106
+ def __init__(self,
107
+ n_vocab,
108
+ out_channels,
109
+ hidden_channels,
110
+ filter_channels,
111
+ n_heads,
112
+ n_layers,
113
+ kernel_size,
114
+ p_dropout):
115
+ super().__init__()
116
+ self.n_vocab = n_vocab
117
+ self.out_channels = out_channels
118
+ self.hidden_channels = hidden_channels
119
+ self.filter_channels = filter_channels
120
+ self.n_heads = n_heads
121
+ self.n_layers = n_layers
122
+ self.kernel_size = kernel_size
123
+ self.p_dropout = p_dropout
124
+
125
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
126
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
127
+
128
+ self.encoder = attentions.Encoder(
129
+ hidden_channels,
130
+ filter_channels,
131
+ n_heads,
132
+ n_layers,
133
+ kernel_size,
134
+ p_dropout)
135
+ self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
136
+
137
+ def forward(self, x, x_lengths):
138
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
139
+ x = torch.transpose(x, 1, -1) # [b, h, t]
140
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
141
+
142
+ x = self.encoder(x * x_mask, x_mask)
143
+ stats = self.proj(x) * x_mask
144
+
145
+ m, logs = torch.split(stats, self.out_channels, dim=1)
146
+ return x, m, logs, x_mask
147
+
148
+
149
+ class ResidualCouplingBlock(nn.Module):
150
+ def __init__(self,
151
+ channels,
152
+ hidden_channels,
153
+ kernel_size,
154
+ dilation_rate,
155
+ n_layers,
156
+ n_flows=4,
157
+ gin_channels=0):
158
+ super().__init__()
159
+ self.channels = channels
160
+ self.hidden_channels = hidden_channels
161
+ self.kernel_size = kernel_size
162
+ self.dilation_rate = dilation_rate
163
+ self.n_layers = n_layers
164
+ self.n_flows = n_flows
165
+ self.gin_channels = gin_channels
166
+
167
+ self.flows = nn.ModuleList()
168
+ for i in range(n_flows):
169
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
170
+ self.flows.append(modules.Flip())
171
+
172
+ self.reverse = None
173
+ def forward(self, x, x_mask, g=None):
174
+ reverse = self.reverse
175
+ if not reverse:
176
+ for flow in self.flows:
177
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
178
+ else:
179
+ for flow in reversed(self.flows):
180
+ x = flow(x, x_mask, g=g, reverse=reverse)
181
+ return x
182
+
183
+
184
+ class PosteriorEncoder(nn.Module):
185
+ def __init__(self,
186
+ in_channels,
187
+ out_channels,
188
+ hidden_channels,
189
+ kernel_size,
190
+ dilation_rate,
191
+ n_layers,
192
+ gin_channels=0):
193
+ super().__init__()
194
+ self.in_channels = in_channels
195
+ self.out_channels = out_channels
196
+ self.hidden_channels = hidden_channels
197
+ self.kernel_size = kernel_size
198
+ self.dilation_rate = dilation_rate
199
+ self.n_layers = n_layers
200
+ self.gin_channels = gin_channels
201
+
202
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
203
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
204
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
205
+
206
+ def forward(self, x, x_lengths, g=None):
207
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
208
+ x = self.pre(x) * x_mask # x_in : [b, c, t] -> [b, h, t]
209
+ x = self.enc(x, x_mask, g=g) # x_in : [b, h, t], g : [b, h, 1], x = x_in + g
210
+ stats = self.proj(x) * x_mask
211
+ m, logs = torch.split(stats, self.out_channels, dim=1)
212
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
213
+ return z, m, logs, x_mask # z, m, logs : [b, h, t]
214
+
215
+
216
+ class Generator(torch.nn.Module):
217
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
218
+ super(Generator, self).__init__()
219
+ self.num_kernels = len(resblock_kernel_sizes)
220
+ self.num_upsamples = len(upsample_rates)
221
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
222
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
223
+
224
+ self.ups = nn.ModuleList()
225
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
226
+ self.ups.append(weight_norm(
227
+ ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
228
+ k, u, padding=(k-u)//2)))
229
+
230
+ self.resblocks = nn.ModuleList()
231
+ for i in range(len(self.ups)):
232
+ ch = upsample_initial_channel//(2**(i+1))
233
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
234
+ self.resblocks.append(resblock(ch, k, d))
235
+
236
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
237
+ self.ups.apply(init_weights)
238
+
239
+ if gin_channels != 0:
240
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
241
+
242
+ def forward(self, x, g=None):
243
+ x = self.conv_pre(x)
244
+ if g is not None:
245
+ x = x + self.cond(g)
246
+
247
+ for i in range(self.num_upsamples):
248
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
249
+ x = self.ups[i](x)
250
+ xs = None
251
+ for j in range(self.num_kernels):
252
+ if xs is None:
253
+ xs = self.resblocks[i*self.num_kernels+j](x)
254
+ else:
255
+ xs += self.resblocks[i*self.num_kernels+j](x)
256
+ x = xs / self.num_kernels
257
+ x = F.leaky_relu(x)
258
+ x = self.conv_post(x)
259
+ x = torch.tanh(x)
260
+
261
+ return x
262
+
263
+ def remove_weight_norm(self):
264
+ print('Removing weight norm...')
265
+ for l in self.ups:
266
+ remove_weight_norm(l)
267
+ for l in self.resblocks:
268
+ l.remove_weight_norm()
269
+
270
+
271
+ class DiscriminatorP(torch.nn.Module):
272
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
273
+ super(DiscriminatorP, self).__init__()
274
+ self.period = period
275
+ self.use_spectral_norm = use_spectral_norm
276
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
277
+ self.convs = nn.ModuleList([
278
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
279
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
280
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
281
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
282
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
283
+ ])
284
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
285
+
286
+ def forward(self, x):
287
+ fmap = []
288
+
289
+ # 1d to 2d
290
+ b, c, t = x.shape
291
+ if t % self.period != 0: # pad first
292
+ n_pad = self.period - (t % self.period)
293
+ x = F.pad(x, (0, n_pad), "reflect")
294
+ t = t + n_pad
295
+ x = x.view(b, c, t // self.period, self.period)
296
+
297
+ for l in self.convs:
298
+ x = l(x)
299
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
300
+ fmap.append(x)
301
+ x = self.conv_post(x)
302
+ fmap.append(x)
303
+ x = torch.flatten(x, 1, -1)
304
+
305
+ return x, fmap
306
+
307
+
308
+ class DiscriminatorS(torch.nn.Module):
309
+ def __init__(self, use_spectral_norm=False):
310
+ super(DiscriminatorS, self).__init__()
311
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
312
+ self.convs = nn.ModuleList([
313
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
314
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
315
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
316
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
317
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
318
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
319
+ ])
320
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
321
+
322
+ def forward(self, x):
323
+ fmap = []
324
+
325
+ for l in self.convs:
326
+ x = l(x)
327
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
328
+ fmap.append(x)
329
+ x = self.conv_post(x)
330
+ fmap.append(x)
331
+ x = torch.flatten(x, 1, -1)
332
+
333
+ return x, fmap
334
+
335
+
336
+ class MultiPeriodDiscriminator(torch.nn.Module):
337
+ def __init__(self, use_spectral_norm=False):
338
+ super(MultiPeriodDiscriminator, self).__init__()
339
+ periods = [2,3,5,7,11]
340
+
341
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
342
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
343
+ self.discriminators = nn.ModuleList(discs)
344
+
345
+ def forward(self, y, y_hat):
346
+ y_d_rs = []
347
+ y_d_gs = []
348
+ fmap_rs = []
349
+ fmap_gs = []
350
+ for i, d in enumerate(self.discriminators):
351
+ y_d_r, fmap_r = d(y)
352
+ y_d_g, fmap_g = d(y_hat)
353
+ y_d_rs.append(y_d_r)
354
+ y_d_gs.append(y_d_g)
355
+ fmap_rs.append(fmap_r)
356
+ fmap_gs.append(fmap_g)
357
+
358
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
359
+
360
+
361
+
362
+ class SynthesizerTrn(nn.Module):
363
+ """
364
+ Synthesizer for Training
365
+ """
366
+
367
+ def __init__(self,
368
+ n_vocab,
369
+ spec_channels,
370
+ segment_size,
371
+ inter_channels,
372
+ hidden_channels,
373
+ filter_channels,
374
+ n_heads,
375
+ n_layers,
376
+ kernel_size,
377
+ p_dropout,
378
+ resblock,
379
+ resblock_kernel_sizes,
380
+ resblock_dilation_sizes,
381
+ upsample_rates,
382
+ upsample_initial_channel,
383
+ upsample_kernel_sizes,
384
+ n_speakers=0,
385
+ gin_channels=0,
386
+ use_sdp=True,
387
+ **kwargs):
388
+
389
+ super().__init__()
390
+ self.n_vocab = n_vocab
391
+ self.spec_channels = spec_channels
392
+ self.inter_channels = inter_channels
393
+ self.hidden_channels = hidden_channels
394
+ self.filter_channels = filter_channels
395
+ self.n_heads = n_heads
396
+ self.n_layers = n_layers
397
+ self.kernel_size = kernel_size
398
+ self.p_dropout = p_dropout
399
+ self.resblock = resblock
400
+ self.resblock_kernel_sizes = resblock_kernel_sizes
401
+ self.resblock_dilation_sizes = resblock_dilation_sizes
402
+ self.upsample_rates = upsample_rates
403
+ self.upsample_initial_channel = upsample_initial_channel
404
+ self.upsample_kernel_sizes = upsample_kernel_sizes
405
+ self.segment_size = segment_size
406
+ self.n_speakers = n_speakers
407
+ self.gin_channels = gin_channels
408
+
409
+ self.use_sdp = use_sdp
410
+
411
+ self.enc_p = TextEncoder(n_vocab,
412
+ inter_channels,
413
+ hidden_channels,
414
+ filter_channels,
415
+ n_heads,
416
+ n_layers,
417
+ kernel_size,
418
+ p_dropout)
419
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
420
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
421
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
422
+
423
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
424
+
425
+ if n_speakers > 0:
426
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
427
+
428
+ def forward(self, x, x_lengths, sid=None, noise_scale=.667, length_scale=1, noise_scale_w=.8, max_len=None):
429
+ torch.onnx.export(
430
+ self.enc_p,
431
+ (x, x_lengths),
432
+ "ONNX_net/enc_p.onnx",
433
+ input_names=["x", "x_lengths"],
434
+ output_names=["xout", "m_p", "logs_p", "x_mask"],
435
+ dynamic_axes={
436
+ "x" : [1],
437
+ "xout" : [2],
438
+ "m_p" : [2],
439
+ "logs_p" : [2],
440
+ "x_mask" : [2]
441
+ },
442
+ verbose=True,
443
+ )
444
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
445
+
446
+ if self.n_speakers > 0:
447
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
448
+ else:
449
+ g = None
450
+
451
+ self.dp.reverse = True
452
+ self.dp.noise_scale = noise_scale_w
453
+ torch.onnx.export(
454
+ self.dp,
455
+ (x, x_mask, g),
456
+ "ONNX_net/dp.onnx",
457
+ input_names=["x", "x_mask", "g"],
458
+ output_names=["logw"],
459
+ dynamic_axes={
460
+ "x" : [2],
461
+ "x_mask" : [2],
462
+ "logw" : [2]
463
+ },
464
+ verbose=True,
465
+ )
466
+ logw = self.dp(x, x_mask, g=g)
467
+ w = torch.exp(logw) * x_mask * length_scale
468
+ w_ceil = torch.ceil(w)
469
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
470
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
471
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
472
+ attn = commons.generate_path(w_ceil, attn_mask)
473
+
474
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
475
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
476
+
477
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
478
+
479
+ self.flow.reverse = True
480
+ torch.onnx.export(
481
+ self.flow,
482
+ (z_p, y_mask, g),
483
+ "ONNX_net/flow.onnx",
484
+ input_names=["z_p", "y_mask", "g"],
485
+ output_names=["z"],
486
+ dynamic_axes={
487
+ "z_p" : [2],
488
+ "y_mask" : [2],
489
+ "z" : [2]
490
+ },
491
+ verbose=True,
492
+ )
493
+ z = self.flow(z_p, y_mask, g=g)
494
+ z_in = (z * y_mask)[:,:,:max_len]
495
+
496
+ torch.onnx.export(
497
+ self.dec,
498
+ (z_in, g),
499
+ "ONNX_net/dec.onnx",
500
+ input_names=["z_in", "g"],
501
+ output_names=["o"],
502
+ dynamic_axes={
503
+ "z_in" : [2],
504
+ "o" : [2]
505
+ },
506
+ verbose=True,
507
+ )
508
+ o = self.dec(z_in, g=g)
509
+ return o
ONNXVITS_modules.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ import commons
13
+ from commons import init_weights, get_padding
14
+ from ONNXVITS_transforms import piecewise_rational_quadratic_transform
15
+
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
+ super().__init__()
38
+ self.in_channels = in_channels
39
+ self.hidden_channels = hidden_channels
40
+ self.out_channels = out_channels
41
+ self.kernel_size = kernel_size
42
+ self.n_layers = n_layers
43
+ self.p_dropout = p_dropout
44
+ assert n_layers > 1, "Number of layers should be larger than 0."
45
+
46
+ self.conv_layers = nn.ModuleList()
47
+ self.norm_layers = nn.ModuleList()
48
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = nn.Sequential(
51
+ nn.ReLU(),
52
+ nn.Dropout(p_dropout))
53
+ for _ in range(n_layers-1):
54
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DDSConv(nn.Module):
71
+ """
72
+ Dialted and Depth-Separable Convolution
73
+ """
74
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
+ super().__init__()
76
+ self.channels = channels
77
+ self.kernel_size = kernel_size
78
+ self.n_layers = n_layers
79
+ self.p_dropout = p_dropout
80
+
81
+ self.drop = nn.Dropout(p_dropout)
82
+ self.convs_sep = nn.ModuleList()
83
+ self.convs_1x1 = nn.ModuleList()
84
+ self.norms_1 = nn.ModuleList()
85
+ self.norms_2 = nn.ModuleList()
86
+ for i in range(n_layers):
87
+ dilation = kernel_size ** i
88
+ padding = (kernel_size * dilation - dilation) // 2
89
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
+ groups=channels, dilation=dilation, padding=padding
91
+ ))
92
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
+ self.norms_1.append(LayerNorm(channels))
94
+ self.norms_2.append(LayerNorm(channels))
95
+
96
+ def forward(self, x, x_mask, g=None):
97
+ if g is not None:
98
+ x = x + g
99
+ for i in range(self.n_layers):
100
+ y = self.convs_sep[i](x * x_mask)
101
+ y = self.norms_1[i](y)
102
+ y = F.gelu(y)
103
+ y = self.convs_1x1[i](y)
104
+ y = self.norms_2[i](y)
105
+ y = F.gelu(y)
106
+ y = self.drop(y)
107
+ x = x + y
108
+ return x * x_mask
109
+
110
+
111
+ class WN(torch.nn.Module):
112
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
+ super(WN, self).__init__()
114
+ assert(kernel_size % 2 == 1)
115
+ self.hidden_channels =hidden_channels
116
+ self.kernel_size = kernel_size,
117
+ self.dilation_rate = dilation_rate
118
+ self.n_layers = n_layers
119
+ self.gin_channels = gin_channels
120
+ self.p_dropout = p_dropout
121
+
122
+ self.in_layers = torch.nn.ModuleList()
123
+ self.res_skip_layers = torch.nn.ModuleList()
124
+ self.drop = nn.Dropout(p_dropout)
125
+
126
+ if gin_channels != 0:
127
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
+
130
+ for i in range(n_layers):
131
+ dilation = dilation_rate ** i
132
+ padding = int((kernel_size * dilation - dilation) / 2)
133
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
+ dilation=dilation, padding=padding)
135
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
+ self.in_layers.append(in_layer)
137
+
138
+ # last one is not necessary
139
+ if i < n_layers - 1:
140
+ res_skip_channels = 2 * hidden_channels
141
+ else:
142
+ res_skip_channels = hidden_channels
143
+
144
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
+ self.res_skip_layers.append(res_skip_layer)
147
+
148
+ def forward(self, x, x_mask, g=None, **kwargs):
149
+ output = torch.zeros_like(x)
150
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
+
152
+ if g is not None:
153
+ g = self.cond_layer(g)
154
+
155
+ for i in range(self.n_layers):
156
+ x_in = self.in_layers[i](x)
157
+ if g is not None:
158
+ cond_offset = i * 2 * self.hidden_channels
159
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
+ else:
161
+ g_l = torch.zeros_like(x_in)
162
+
163
+ acts = commons.fused_add_tanh_sigmoid_multiply(
164
+ x_in,
165
+ g_l,
166
+ n_channels_tensor)
167
+ acts = self.drop(acts)
168
+
169
+ res_skip_acts = self.res_skip_layers[i](acts)
170
+ if i < self.n_layers - 1:
171
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
+ x = (x + res_acts) * x_mask
173
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
174
+ else:
175
+ output = output + res_skip_acts
176
+ return output * x_mask
177
+
178
+ def remove_weight_norm(self):
179
+ if self.gin_channels != 0:
180
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
181
+ for l in self.in_layers:
182
+ torch.nn.utils.remove_weight_norm(l)
183
+ for l in self.res_skip_layers:
184
+ torch.nn.utils.remove_weight_norm(l)
185
+
186
+
187
+ class ResBlock1(torch.nn.Module):
188
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
+ super(ResBlock1, self).__init__()
190
+ self.convs1 = nn.ModuleList([
191
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
+ padding=get_padding(kernel_size, dilation[0]))),
193
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
+ padding=get_padding(kernel_size, dilation[1]))),
195
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
+ padding=get_padding(kernel_size, dilation[2])))
197
+ ])
198
+ self.convs1.apply(init_weights)
199
+
200
+ self.convs2 = nn.ModuleList([
201
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
+ padding=get_padding(kernel_size, 1))),
203
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
+ padding=get_padding(kernel_size, 1))),
205
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
+ padding=get_padding(kernel_size, 1)))
207
+ ])
208
+ self.convs2.apply(init_weights)
209
+
210
+ def forward(self, x, x_mask=None):
211
+ for c1, c2 in zip(self.convs1, self.convs2):
212
+ xt = F.leaky_relu(x, LRELU_SLOPE)
213
+ if x_mask is not None:
214
+ xt = xt * x_mask
215
+ xt = c1(xt)
216
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
217
+ if x_mask is not None:
218
+ xt = xt * x_mask
219
+ xt = c2(xt)
220
+ x = xt + x
221
+ if x_mask is not None:
222
+ x = x * x_mask
223
+ return x
224
+
225
+ def remove_weight_norm(self):
226
+ for l in self.convs1:
227
+ remove_weight_norm(l)
228
+ for l in self.convs2:
229
+ remove_weight_norm(l)
230
+
231
+
232
+ class ResBlock2(torch.nn.Module):
233
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
+ super(ResBlock2, self).__init__()
235
+ self.convs = nn.ModuleList([
236
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
+ padding=get_padding(kernel_size, dilation[0]))),
238
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
+ padding=get_padding(kernel_size, dilation[1])))
240
+ ])
241
+ self.convs.apply(init_weights)
242
+
243
+ def forward(self, x, x_mask=None):
244
+ for c in self.convs:
245
+ xt = F.leaky_relu(x, LRELU_SLOPE)
246
+ if x_mask is not None:
247
+ xt = xt * x_mask
248
+ xt = c(xt)
249
+ x = xt + x
250
+ if x_mask is not None:
251
+ x = x * x_mask
252
+ return x
253
+
254
+ def remove_weight_norm(self):
255
+ for l in self.convs:
256
+ remove_weight_norm(l)
257
+
258
+
259
+ class Log(nn.Module):
260
+ def forward(self, x, x_mask, reverse=False, **kwargs):
261
+ if not reverse:
262
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
+ logdet = torch.sum(-y, [1, 2])
264
+ return y, logdet
265
+ else:
266
+ x = torch.exp(x) * x_mask
267
+ return x
268
+
269
+
270
+ class Flip(nn.Module):
271
+ def forward(self, x, *args, reverse=False, **kwargs):
272
+ x = torch.flip(x, [1])
273
+ if not reverse:
274
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
+ return x, logdet
276
+ else:
277
+ return x
278
+
279
+
280
+ class ElementwiseAffine(nn.Module):
281
+ def __init__(self, channels):
282
+ super().__init__()
283
+ self.channels = channels
284
+ self.m = nn.Parameter(torch.zeros(channels,1))
285
+ self.logs = nn.Parameter(torch.zeros(channels,1))
286
+
287
+ def forward(self, x, x_mask, reverse=False, **kwargs):
288
+ if not reverse:
289
+ y = self.m + torch.exp(self.logs) * x
290
+ y = y * x_mask
291
+ logdet = torch.sum(self.logs * x_mask, [1,2])
292
+ return y, logdet
293
+ else:
294
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
+ return x
296
+
297
+
298
+ class ResidualCouplingLayer(nn.Module):
299
+ def __init__(self,
300
+ channels,
301
+ hidden_channels,
302
+ kernel_size,
303
+ dilation_rate,
304
+ n_layers,
305
+ p_dropout=0,
306
+ gin_channels=0,
307
+ mean_only=False):
308
+ assert channels % 2 == 0, "channels should be divisible by 2"
309
+ super().__init__()
310
+ self.channels = channels
311
+ self.hidden_channels = hidden_channels
312
+ self.kernel_size = kernel_size
313
+ self.dilation_rate = dilation_rate
314
+ self.n_layers = n_layers
315
+ self.half_channels = channels // 2
316
+ self.mean_only = mean_only
317
+
318
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
+ self.post.weight.data.zero_()
322
+ self.post.bias.data.zero_()
323
+
324
+ def forward(self, x, x_mask, g=None, reverse=False):
325
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
+ h = self.pre(x0) * x_mask
327
+ h = self.enc(h, x_mask, g=g)
328
+ stats = self.post(h) * x_mask
329
+ if not self.mean_only:
330
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
+ else:
332
+ m = stats
333
+ logs = torch.zeros_like(m)
334
+
335
+ if not reverse:
336
+ x1 = m + x1 * torch.exp(logs) * x_mask
337
+ x = torch.cat([x0, x1], 1)
338
+ logdet = torch.sum(logs, [1,2])
339
+ return x, logdet
340
+ else:
341
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
+ x = torch.cat([x0, x1], 1)
343
+ return x
344
+
345
+
346
+ class ConvFlow(nn.Module):
347
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
+ super().__init__()
349
+ self.in_channels = in_channels
350
+ self.filter_channels = filter_channels
351
+ self.kernel_size = kernel_size
352
+ self.n_layers = n_layers
353
+ self.num_bins = num_bins
354
+ self.tail_bound = tail_bound
355
+ self.half_channels = in_channels // 2
356
+
357
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
+ self.proj.weight.data.zero_()
361
+ self.proj.bias.data.zero_()
362
+
363
+ def forward(self, x, x_mask, g=None, reverse=False):
364
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
+ h = self.pre(x0)
366
+ h = self.convs(h, x_mask, g=g)
367
+ h = self.proj(h) * x_mask
368
+
369
+ b, c, t = x0.shape
370
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
+
372
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
+ unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
+
376
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
+ unnormalized_widths,
378
+ unnormalized_heights,
379
+ unnormalized_derivatives,
380
+ inverse=reverse,
381
+ tails='linear',
382
+ tail_bound=self.tail_bound
383
+ )
384
+
385
+ x = torch.cat([x0, x1], 1) * x_mask
386
+ logdet = torch.sum(logabsdet * x_mask, [1,2])
387
+ if not reverse:
388
+ return x, logdet
389
+ else:
390
+ return x
ONNXVITS_to_onnx.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ONNXVITS_models
2
+ import utils
3
+ from text import text_to_sequence
4
+ import torch
5
+ import commons
6
+
7
+ def get_text(text, hps):
8
+ text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
9
+ if hps.data.add_blank:
10
+ text_norm = commons.intersperse(text_norm, 0)
11
+ text_norm = torch.LongTensor(text_norm)
12
+ return text_norm
13
+
14
+ hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json")
15
+ symbols = hps.symbols
16
+ net_g = ONNXVITS_models.SynthesizerTrn(
17
+ len(symbols),
18
+ hps.data.filter_length // 2 + 1,
19
+ hps.train.segment_size // hps.data.hop_length,
20
+ n_speakers=hps.data.n_speakers,
21
+ **hps.model)
22
+ _ = net_g.eval()
23
+ _ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g)
24
+
25
+ text1 = get_text("ありがとうございます。", hps)
26
+ stn_tst = text1
27
+ with torch.no_grad():
28
+ x_tst = stn_tst.unsqueeze(0)
29
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
30
+ sid = torch.tensor([0])
31
+ o = net_g(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)
ONNXVITS_transforms.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+
7
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
8
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
+ DEFAULT_MIN_DERIVATIVE = 1e-3
10
+
11
+
12
+ def piecewise_rational_quadratic_transform(inputs,
13
+ unnormalized_widths,
14
+ unnormalized_heights,
15
+ unnormalized_derivatives,
16
+ inverse=False,
17
+ tails=None,
18
+ tail_bound=1.,
19
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
22
+
23
+ if tails is None:
24
+ spline_fn = rational_quadratic_spline
25
+ spline_kwargs = {}
26
+ else:
27
+ spline_fn = unconstrained_rational_quadratic_spline
28
+ spline_kwargs = {
29
+ 'tails': tails,
30
+ 'tail_bound': tail_bound
31
+ }
32
+
33
+ outputs, logabsdet = spline_fn(
34
+ inputs=inputs,
35
+ unnormalized_widths=unnormalized_widths,
36
+ unnormalized_heights=unnormalized_heights,
37
+ unnormalized_derivatives=unnormalized_derivatives,
38
+ inverse=inverse,
39
+ min_bin_width=min_bin_width,
40
+ min_bin_height=min_bin_height,
41
+ min_derivative=min_derivative,
42
+ **spline_kwargs
43
+ )
44
+ return outputs, logabsdet
45
+
46
+
47
+ def searchsorted(bin_locations, inputs, eps=1e-6):
48
+ bin_locations[..., -1] += eps
49
+ return torch.sum(
50
+ inputs[..., None] >= bin_locations,
51
+ dim=-1
52
+ ) - 1
53
+
54
+
55
+ def unconstrained_rational_quadratic_spline(inputs,
56
+ unnormalized_widths,
57
+ unnormalized_heights,
58
+ unnormalized_derivatives,
59
+ inverse=False,
60
+ tails='linear',
61
+ tail_bound=1.,
62
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
65
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
+ outside_interval_mask = ~inside_interval_mask
67
+
68
+ outputs = torch.zeros_like(inputs)
69
+ logabsdet = torch.zeros_like(inputs)
70
+
71
+ if tails == 'linear':
72
+ #unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
+ unnormalized_derivatives_ = torch.zeros((1, 1, unnormalized_derivatives.size(2), unnormalized_derivatives.size(3)+2))
74
+ unnormalized_derivatives_[...,1:-1] = unnormalized_derivatives
75
+ unnormalized_derivatives = unnormalized_derivatives_
76
+ constant = np.log(np.exp(1 - min_derivative) - 1)
77
+ unnormalized_derivatives[..., 0] = constant
78
+ unnormalized_derivatives[..., -1] = constant
79
+
80
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
81
+ logabsdet[outside_interval_mask] = 0
82
+ else:
83
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
84
+
85
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
86
+ inputs=inputs[inside_interval_mask],
87
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
88
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
89
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
90
+ inverse=inverse,
91
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
92
+ min_bin_width=min_bin_width,
93
+ min_bin_height=min_bin_height,
94
+ min_derivative=min_derivative
95
+ )
96
+
97
+ return outputs, logabsdet
98
+
99
+ def rational_quadratic_spline(inputs,
100
+ unnormalized_widths,
101
+ unnormalized_heights,
102
+ unnormalized_derivatives,
103
+ inverse=False,
104
+ left=0., right=1., bottom=0., top=1.,
105
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
106
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
107
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
108
+ if torch.min(inputs) < left or torch.max(inputs) > right:
109
+ raise ValueError('Input to a transform is not within its domain')
110
+
111
+ num_bins = unnormalized_widths.shape[-1]
112
+
113
+ if min_bin_width * num_bins > 1.0:
114
+ raise ValueError('Minimal bin width too large for the number of bins')
115
+ if min_bin_height * num_bins > 1.0:
116
+ raise ValueError('Minimal bin height too large for the number of bins')
117
+
118
+ widths = F.softmax(unnormalized_widths, dim=-1)
119
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
120
+ cumwidths = torch.cumsum(widths, dim=-1)
121
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
122
+ cumwidths = (right - left) * cumwidths + left
123
+ cumwidths[..., 0] = left
124
+ cumwidths[..., -1] = right
125
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
126
+
127
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
128
+
129
+ heights = F.softmax(unnormalized_heights, dim=-1)
130
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
131
+ cumheights = torch.cumsum(heights, dim=-1)
132
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
133
+ cumheights = (top - bottom) * cumheights + bottom
134
+ cumheights[..., 0] = bottom
135
+ cumheights[..., -1] = top
136
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
137
+
138
+ if inverse:
139
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
140
+ else:
141
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
142
+
143
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
144
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
145
+
146
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
147
+ delta = heights / widths
148
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
149
+
150
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
151
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
152
+
153
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
154
+
155
+ if inverse:
156
+ a = (((inputs - input_cumheights) * (input_derivatives
157
+ + input_derivatives_plus_one
158
+ - 2 * input_delta)
159
+ + input_heights * (input_delta - input_derivatives)))
160
+ b = (input_heights * input_derivatives
161
+ - (inputs - input_cumheights) * (input_derivatives
162
+ + input_derivatives_plus_one
163
+ - 2 * input_delta))
164
+ c = - input_delta * (inputs - input_cumheights)
165
+
166
+ discriminant = b.pow(2) - 4 * a * c
167
+ assert (discriminant >= 0).all()
168
+
169
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
170
+ outputs = root * input_bin_widths + input_cumwidths
171
+
172
+ theta_one_minus_theta = root * (1 - root)
173
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
174
+ * theta_one_minus_theta)
175
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
176
+ + 2 * input_delta * theta_one_minus_theta
177
+ + input_derivatives * (1 - root).pow(2))
178
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
179
+
180
+ return outputs, -logabsdet
181
+ else:
182
+ theta = (inputs - input_cumwidths) / input_bin_widths
183
+ theta_one_minus_theta = theta * (1 - theta)
184
+
185
+ numerator = input_heights * (input_delta * theta.pow(2)
186
+ + input_derivatives * theta_one_minus_theta)
187
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
188
+ * theta_one_minus_theta)
189
+ outputs = input_cumheights + numerator / denominator
190
+
191
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
192
+ + 2 * input_delta * theta_one_minus_theta
193
+ + input_derivatives * (1 - theta).pow(2))
194
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
195
+
196
+ return outputs, logabsdet
ONNXVITS_utils.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import random
4
+ import onnxruntime as ort
5
+ def set_random_seed(seed=0):
6
+ ort.set_seed(seed)
7
+ torch.manual_seed(seed)
8
+ torch.cuda.manual_seed(seed)
9
+ torch.backends.cudnn.deterministic = True
10
+ random.seed(seed)
11
+ np.random.seed(seed)
12
+
13
+ def runonnx(model_path, **kwargs):
14
+ ort_session = ort.InferenceSession(model_path)
15
+ outputs = ort_session.run(
16
+ None,
17
+ kwargs
18
+ )
19
+ return outputs
ONNX_net/dec.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f5b6cd61faabd9606d85dccf5a2b9720a95fc0d9f4a93c80b5be43764816a81
3
+ size 58183684
ONNX_net/dp.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06fd386f4d2c75fb54d0092db4fa35b64bc22741c1a9e5431fb99b24fa067fcd
3
+ size 7387023
ONNX_net/enc_p.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:270154c4d7d8f1a16480990cf08085526d39818aabd94bf5204efe7e9c5615d1
3
+ size 28510879
ONNX_net/flow.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10ec205d80f5dfbfe5ed8ef3a8aa4ffbe126b7e8fcf05e1eb64d73793aeec011
3
+ size 35707325
attentions.py CHANGED
@@ -1,12 +1,9 @@
1
- import copy
2
  import math
3
- import numpy as np
4
  import torch
5
  from torch import nn
6
  from torch.nn import functional as F
7
 
8
  import commons
9
- import modules
10
  from modules import LayerNorm
11
 
12
 
 
 
1
  import math
 
2
  import torch
3
  from torch import nn
4
  from torch.nn import functional as F
5
 
6
  import commons
 
7
  from modules import LayerNorm
8
 
9
 
commons.py CHANGED
@@ -1,8 +1,19 @@
1
  import math
2
- import numpy as np
3
  import torch
4
- from torch import nn
5
  from torch.nn import functional as F
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
 
8
  def init_weights(m, mean=0.0, std=0.01):
@@ -15,36 +26,12 @@ def get_padding(kernel_size, dilation=1):
15
  return int((kernel_size*dilation - dilation)/2)
16
 
17
 
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
  def intersperse(lst, item):
25
  result = [item] * (len(lst) * 2 + 1)
26
  result[1::2] = lst
27
  return result
28
 
29
 
30
- def kl_divergence(m_p, logs_p, m_q, logs_q):
31
- """KL(P||Q)"""
32
- kl = (logs_q - logs_p) - 0.5
33
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
34
- return kl
35
-
36
-
37
- def rand_gumbel(shape):
38
- """Sample from the Gumbel distribution, protect from overflows."""
39
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
- return -torch.log(-torch.log(uniform_samples))
41
-
42
-
43
- def rand_gumbel_like(x):
44
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
- return g
46
-
47
-
48
  def slice_segments(x, ids_str, segment_size=4):
49
  ret = torch.zeros_like(x[:, :, :segment_size])
50
  for i in range(x.size(0)):
@@ -64,34 +51,6 @@ def rand_slice_segments(x, x_lengths=None, segment_size=4):
64
  return ret, ids_str
65
 
66
 
67
- def get_timing_signal_1d(
68
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
- position = torch.arange(length, dtype=torch.float)
70
- num_timescales = channels // 2
71
- log_timescale_increment = (
72
- math.log(float(max_timescale) / float(min_timescale)) /
73
- (num_timescales - 1))
74
- inv_timescales = min_timescale * torch.exp(
75
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
- signal = F.pad(signal, [0, 0, 0, channels % 2])
79
- signal = signal.view(1, channels, length)
80
- return signal
81
-
82
-
83
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
- b, channels, length = x.size()
85
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
- return x + signal.to(dtype=x.dtype, device=x.device)
87
-
88
-
89
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
- b, channels, length = x.size()
91
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
-
94
-
95
  def subsequent_mask(length):
96
  mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
  return mask
@@ -113,11 +72,6 @@ def convert_pad_shape(pad_shape):
113
  return pad_shape
114
 
115
 
116
- def shift_1d(x):
117
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
- return x
119
-
120
-
121
  def sequence_mask(length, max_length=None):
122
  if max_length is None:
123
  max_length = length.max()
@@ -141,21 +95,3 @@ def generate_path(duration, mask):
141
  path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
  path = path.unsqueeze(1).transpose(2,3) * mask
143
  return path
144
-
145
-
146
- def clip_grad_value_(parameters, clip_value, norm_type=2):
147
- if isinstance(parameters, torch.Tensor):
148
- parameters = [parameters]
149
- parameters = list(filter(lambda p: p.grad is not None, parameters))
150
- norm_type = float(norm_type)
151
- if clip_value is not None:
152
- clip_value = float(clip_value)
153
-
154
- total_norm = 0
155
- for p in parameters:
156
- param_norm = p.grad.data.norm(norm_type)
157
- total_norm += param_norm.item() ** norm_type
158
- if clip_value is not None:
159
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
- total_norm = total_norm ** (1. / norm_type)
161
- return total_norm
 
1
  import math
 
2
  import torch
 
3
  from torch.nn import functional as F
4
+ import torch.jit
5
+
6
+
7
+ def script_method(fn, _rcb=None):
8
+ return fn
9
+
10
+
11
+ def script(obj, optimize=True, _frames_up=0, _rcb=None):
12
+ return obj
13
+
14
+
15
+ torch.jit.script_method = script_method
16
+ torch.jit.script = script
17
 
18
 
19
  def init_weights(m, mean=0.0, std=0.01):
 
26
  return int((kernel_size*dilation - dilation)/2)
27
 
28
 
 
 
 
 
 
 
29
  def intersperse(lst, item):
30
  result = [item] * (len(lst) * 2 + 1)
31
  result[1::2] = lst
32
  return result
33
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def slice_segments(x, ids_str, segment_size=4):
36
  ret = torch.zeros_like(x[:, :, :segment_size])
37
  for i in range(x.size(0)):
 
51
  return ret, ids_str
52
 
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  def subsequent_mask(length):
55
  mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
56
  return mask
 
72
  return pad_shape
73
 
74
 
 
 
 
 
 
75
  def sequence_mask(length, max_length=None):
76
  if max_length is None:
77
  max_length = length.max()
 
95
  path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
96
  path = path.unsqueeze(1).transpose(2,3) * mask
97
  return path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hubert_model.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import Optional, Tuple
3
+ import random
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
9
+
10
+ class Hubert(nn.Module):
11
+ def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
12
+ super().__init__()
13
+ self._mask = mask
14
+ self.feature_extractor = FeatureExtractor()
15
+ self.feature_projection = FeatureProjection()
16
+ self.positional_embedding = PositionalConvEmbedding()
17
+ self.norm = nn.LayerNorm(768)
18
+ self.dropout = nn.Dropout(0.1)
19
+ self.encoder = TransformerEncoder(
20
+ nn.TransformerEncoderLayer(
21
+ 768, 12, 3072, activation="gelu", batch_first=True
22
+ ),
23
+ 12,
24
+ )
25
+ self.proj = nn.Linear(768, 256)
26
+
27
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
28
+ self.label_embedding = nn.Embedding(num_label_embeddings, 256)
29
+
30
+ def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
31
+ mask = None
32
+ if self.training and self._mask:
33
+ mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
34
+ x[mask] = self.masked_spec_embed.to(x.dtype)
35
+ return x, mask
36
+
37
+ def encode(
38
+ self, x: torch.Tensor, layer: Optional[int] = None
39
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
40
+ x = self.feature_extractor(x)
41
+ x = self.feature_projection(x.transpose(1, 2))
42
+ x, mask = self.mask(x)
43
+ x = x + self.positional_embedding(x)
44
+ x = self.dropout(self.norm(x))
45
+ x = self.encoder(x, output_layer=layer)
46
+ return x, mask
47
+
48
+ def logits(self, x: torch.Tensor) -> torch.Tensor:
49
+ logits = torch.cosine_similarity(
50
+ x.unsqueeze(2),
51
+ self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
52
+ dim=-1,
53
+ )
54
+ return logits / 0.1
55
+
56
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
57
+ x, mask = self.encode(x)
58
+ x = self.proj(x)
59
+ logits = self.logits(x)
60
+ return logits, mask
61
+
62
+
63
+ class HubertSoft(Hubert):
64
+ def __init__(self):
65
+ super().__init__()
66
+
67
+ @torch.inference_mode()
68
+ def units(self, wav: torch.Tensor) -> torch.Tensor:
69
+ wav = F.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
70
+ x, _ = self.encode(wav)
71
+ return self.proj(x)
72
+
73
+
74
+ class FeatureExtractor(nn.Module):
75
+ def __init__(self):
76
+ super().__init__()
77
+ self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
78
+ self.norm0 = nn.GroupNorm(512, 512)
79
+ self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
80
+ self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
81
+ self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
82
+ self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
83
+ self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
84
+ self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
85
+
86
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
87
+ x = F.gelu(self.norm0(self.conv0(x)))
88
+ x = F.gelu(self.conv1(x))
89
+ x = F.gelu(self.conv2(x))
90
+ x = F.gelu(self.conv3(x))
91
+ x = F.gelu(self.conv4(x))
92
+ x = F.gelu(self.conv5(x))
93
+ x = F.gelu(self.conv6(x))
94
+ return x
95
+
96
+
97
+ class FeatureProjection(nn.Module):
98
+ def __init__(self):
99
+ super().__init__()
100
+ self.norm = nn.LayerNorm(512)
101
+ self.projection = nn.Linear(512, 768)
102
+ self.dropout = nn.Dropout(0.1)
103
+
104
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
105
+ x = self.norm(x)
106
+ x = self.projection(x)
107
+ x = self.dropout(x)
108
+ return x
109
+
110
+
111
+ class PositionalConvEmbedding(nn.Module):
112
+ def __init__(self):
113
+ super().__init__()
114
+ self.conv = nn.Conv1d(
115
+ 768,
116
+ 768,
117
+ kernel_size=128,
118
+ padding=128 // 2,
119
+ groups=16,
120
+ )
121
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
122
+
123
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
124
+ x = self.conv(x.transpose(1, 2))
125
+ x = F.gelu(x[:, :, :-1])
126
+ return x.transpose(1, 2)
127
+
128
+
129
+ class TransformerEncoder(nn.Module):
130
+ def __init__(
131
+ self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
132
+ ) -> None:
133
+ super(TransformerEncoder, self).__init__()
134
+ self.layers = nn.ModuleList(
135
+ [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
136
+ )
137
+ self.num_layers = num_layers
138
+
139
+ def forward(
140
+ self,
141
+ src: torch.Tensor,
142
+ mask: torch.Tensor = None,
143
+ src_key_padding_mask: torch.Tensor = None,
144
+ output_layer: Optional[int] = None,
145
+ ) -> torch.Tensor:
146
+ output = src
147
+ for layer in self.layers[:output_layer]:
148
+ output = layer(
149
+ output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
150
+ )
151
+ return output
152
+
153
+
154
+ def _compute_mask(
155
+ shape: Tuple[int, int],
156
+ mask_prob: float,
157
+ mask_length: int,
158
+ device: torch.device,
159
+ min_masks: int = 0,
160
+ ) -> torch.Tensor:
161
+ batch_size, sequence_length = shape
162
+
163
+ if mask_length < 1:
164
+ raise ValueError("`mask_length` has to be bigger than 0.")
165
+
166
+ if mask_length > sequence_length:
167
+ raise ValueError(
168
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
169
+ )
170
+
171
+ # compute number of masked spans in batch
172
+ num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
173
+ num_masked_spans = max(num_masked_spans, min_masks)
174
+
175
+ # make sure num masked indices <= sequence_length
176
+ if num_masked_spans * mask_length > sequence_length:
177
+ num_masked_spans = sequence_length // mask_length
178
+
179
+ # SpecAugment mask to fill
180
+ mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
181
+
182
+ # uniform distribution to sample from, make sure that offset samples are < sequence_length
183
+ uniform_dist = torch.ones(
184
+ (batch_size, sequence_length - (mask_length - 1)), device=device
185
+ )
186
+
187
+ # get random indices to mask
188
+ mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
189
+
190
+ # expand masked indices to masked spans
191
+ mask_indices = (
192
+ mask_indices.unsqueeze(dim=-1)
193
+ .expand((batch_size, num_masked_spans, mask_length))
194
+ .reshape(batch_size, num_masked_spans * mask_length)
195
+ )
196
+ offsets = (
197
+ torch.arange(mask_length, device=device)[None, None, :]
198
+ .expand((batch_size, num_masked_spans, mask_length))
199
+ .reshape(batch_size, num_masked_spans * mask_length)
200
+ )
201
+ mask_idxs = mask_indices + offsets
202
+
203
+ # scatter indices to mask
204
+ mask = mask.scatter(1, mask_idxs, True)
205
+
206
+ return mask
207
+
208
+
209
+ def hubert_soft(
210
+ path: str
211
+ ) -> HubertSoft:
212
+ r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
213
+ Args:
214
+ path (str): path of a pretrained model
215
+ """
216
+ hubert = HubertSoft()
217
+ checkpoint = torch.load(path)
218
+ consume_prefix_in_state_dict_if_present(checkpoint, "module.")
219
+ hubert.load_state_dict(checkpoint)
220
+ hubert.eval()
221
+ return hubert
jieba/dict.txt ADDED
The diff for this file is too large to render. See raw diff
 
mel_processing.py CHANGED
@@ -1,16 +1,5 @@
1
- import math
2
- import os
3
- import random
4
  import torch
5
- from torch import nn
6
- import torch.nn.functional as F
7
  import torch.utils.data
8
- import numpy as np
9
- import librosa
10
- import librosa.util as librosa_util
11
- from librosa.util import normalize, pad_center, tiny
12
- from scipy.signal import get_window
13
- from scipy.io.wavfile import read
14
  from librosa.filters import mel as librosa_mel_fn
15
 
16
  MAX_WAV_VALUE = 32768.0
@@ -64,7 +53,7 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False)
64
  y = y.squeeze(1)
65
 
66
  spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
67
- center=center, pad_mode='reflect', normalized=False, onesided=True)
68
 
69
  spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
70
  return spec
 
 
 
 
1
  import torch
 
 
2
  import torch.utils.data
 
 
 
 
 
 
3
  from librosa.filters import mel as librosa_mel_fn
4
 
5
  MAX_WAV_VALUE = 32768.0
 
53
  y = y.squeeze(1)
54
 
55
  spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
56
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
57
 
58
  spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
59
  return spec
models.py CHANGED
@@ -1,4 +1,3 @@
1
- import copy
2
  import math
3
  import torch
4
  from torch import nn
@@ -7,9 +6,9 @@ from torch.nn import functional as F
7
  import commons
8
  import modules
9
  import attentions
10
- # import monotonic_align
11
 
12
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
13
  from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
14
  from commons import init_weights, get_padding
15
 
@@ -141,7 +140,8 @@ class TextEncoder(nn.Module):
141
  n_heads,
142
  n_layers,
143
  kernel_size,
144
- p_dropout):
 
145
  super().__init__()
146
  self.n_vocab = n_vocab
147
  self.out_channels = out_channels
@@ -151,9 +151,13 @@ class TextEncoder(nn.Module):
151
  self.n_layers = n_layers
152
  self.kernel_size = kernel_size
153
  self.p_dropout = p_dropout
154
-
155
- self.emb = nn.Embedding(n_vocab, hidden_channels)
156
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
 
 
 
 
157
 
158
  self.encoder = attentions.Encoder(
159
  hidden_channels,
@@ -164,8 +168,11 @@ class TextEncoder(nn.Module):
164
  p_dropout)
165
  self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
166
 
167
- def forward(self, x, x_lengths):
168
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
 
 
 
169
  x = torch.transpose(x, 1, -1) # [b, h, t]
170
  x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
171
 
@@ -412,6 +419,7 @@ class SynthesizerTrn(nn.Module):
412
  n_speakers=0,
413
  gin_channels=0,
414
  use_sdp=True,
 
415
  **kwargs):
416
 
417
  super().__init__()
@@ -443,7 +451,8 @@ class SynthesizerTrn(nn.Module):
443
  n_heads,
444
  n_layers,
445
  kernel_size,
446
- p_dropout)
 
447
  self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
448
  self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
449
  self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
@@ -496,8 +505,8 @@ class SynthesizerTrn(nn.Module):
496
  o = self.dec(z_slice, g=g)
497
  return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
498
 
499
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
500
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
501
  if self.n_speakers > 0:
502
  g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
503
  else:
 
 
1
  import math
2
  import torch
3
  from torch import nn
 
6
  import commons
7
  import modules
8
  import attentions
9
+ import monotonic_align
10
 
11
+ from torch.nn import Conv1d, ConvTranspose1d, Conv2d
12
  from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
13
  from commons import init_weights, get_padding
14
 
 
140
  n_heads,
141
  n_layers,
142
  kernel_size,
143
+ p_dropout,
144
+ emotion_embedding):
145
  super().__init__()
146
  self.n_vocab = n_vocab
147
  self.out_channels = out_channels
 
151
  self.n_layers = n_layers
152
  self.kernel_size = kernel_size
153
  self.p_dropout = p_dropout
154
+ self.emotion_embedding = emotion_embedding
155
+
156
+ if self.n_vocab!=0:
157
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
158
+ if emotion_embedding:
159
+ self.emotion_emb = nn.Linear(1024, hidden_channels)
160
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
161
 
162
  self.encoder = attentions.Encoder(
163
  hidden_channels,
 
168
  p_dropout)
169
  self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
170
 
171
+ def forward(self, x, x_lengths, emotion_embedding=None):
172
+ if self.n_vocab!=0:
173
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
174
+ if emotion_embedding is not None:
175
+ x = x + self.emotion_emb(emotion_embedding.unsqueeze(1))
176
  x = torch.transpose(x, 1, -1) # [b, h, t]
177
  x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
178
 
 
419
  n_speakers=0,
420
  gin_channels=0,
421
  use_sdp=True,
422
+ emotion_embedding=False,
423
  **kwargs):
424
 
425
  super().__init__()
 
451
  n_heads,
452
  n_layers,
453
  kernel_size,
454
+ p_dropout,
455
+ emotion_embedding)
456
  self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
457
  self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
458
  self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
 
505
  o = self.dec(z_slice, g=g)
506
  return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
507
 
508
+ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, emotion_embedding=None):
509
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
510
  if self.n_speakers > 0:
511
  g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
512
  else:
modules.py CHANGED
@@ -1,12 +1,9 @@
1
- import copy
2
  import math
3
- import numpy as np
4
- import scipy
5
  import torch
6
  from torch import nn
7
  from torch.nn import functional as F
8
 
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
  from torch.nn.utils import weight_norm, remove_weight_norm
11
 
12
  import commons
 
 
1
  import math
 
 
2
  import torch
3
  from torch import nn
4
  from torch.nn import functional as F
5
 
6
+ from torch.nn import Conv1d
7
  from torch.nn.utils import weight_norm, remove_weight_norm
8
 
9
  import commons
text/__init__.py CHANGED
@@ -1,14 +1,8 @@
1
  """ from https://github.com/keithito/tacotron """
2
  from text import cleaners
3
- from text.symbols import symbols
4
 
5
 
6
- # Mappings from symbol to numeric ID and vice versa:
7
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
- _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
-
10
-
11
- def text_to_sequence(text, cleaner_names):
12
  '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
  Args:
14
  text: string to convert to a sequence
@@ -16,6 +10,8 @@ def text_to_sequence(text, cleaner_names):
16
  Returns:
17
  List of integers corresponding to the symbols in the text
18
  '''
 
 
19
  sequence = []
20
 
21
  clean_text = _clean_text(text, cleaner_names)
@@ -27,26 +23,6 @@ def text_to_sequence(text, cleaner_names):
27
  return sequence
28
 
29
 
30
- def cleaned_text_to_sequence(cleaned_text):
31
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
32
- Args:
33
- text: string to convert to a sequence
34
- Returns:
35
- List of integers corresponding to the symbols in the text
36
- '''
37
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
38
- return sequence
39
-
40
-
41
- def sequence_to_text(sequence):
42
- '''Converts a sequence of IDs back to a string'''
43
- result = ''
44
- for symbol_id in sequence:
45
- s = _id_to_symbol[symbol_id]
46
- result += s
47
- return result
48
-
49
-
50
  def _clean_text(text, cleaner_names):
51
  for name in cleaner_names:
52
  cleaner = getattr(cleaners, name)
 
1
  """ from https://github.com/keithito/tacotron """
2
  from text import cleaners
 
3
 
4
 
5
+ def text_to_sequence(text, symbols, cleaner_names):
 
 
 
 
 
6
  '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
7
  Args:
8
  text: string to convert to a sequence
 
10
  Returns:
11
  List of integers corresponding to the symbols in the text
12
  '''
13
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
14
+
15
  sequence = []
16
 
17
  clean_text = _clean_text(text, cleaner_names)
 
23
  return sequence
24
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def _clean_text(text, cleaner_names):
27
  for name in cleaner_names:
28
  cleaner = getattr(cleaners, name)
text/__pycache__/__init__.cpython-37.pyc CHANGED
Binary files a/text/__pycache__/__init__.cpython-37.pyc and b/text/__pycache__/__init__.cpython-37.pyc differ
 
text/__pycache__/cleaners.cpython-37.pyc CHANGED
Binary files a/text/__pycache__/cleaners.cpython-37.pyc and b/text/__pycache__/cleaners.cpython-37.pyc differ
 
text/__pycache__/japanese.cpython-37.pyc CHANGED
Binary files a/text/__pycache__/japanese.cpython-37.pyc and b/text/__pycache__/japanese.cpython-37.pyc differ
 
text/cleaners.py CHANGED
@@ -1,12 +1,8 @@
1
  import re
2
- from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3
3
-
4
- # from text.shanghainese import shanghainese_to_ipa
5
- # from text.cantonese import cantonese_to_ipa
6
- # from text.ngu_dialect import ngu_dialect_to_ipa
7
 
8
 
9
  def japanese_cleaners(text):
 
10
  text = japanese_to_romaji_with_accent(text)
11
  text = re.sub(r'([A-Za-z])$', r'\1.', text)
12
  return text
@@ -16,28 +12,135 @@ def japanese_cleaners2(text):
16
  return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
17
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
 
 
 
 
 
20
 
21
 
22
- # def shanghainese_cleaners(text):
23
- # text = shanghainese_to_ipa(text)
24
- # text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
25
- # return text
 
26
 
27
 
28
- # def chinese_dialect_cleaners(text):
29
- # text = re.sub(r'\[ZH\](.*?)\[ZH\]',
30
- # lambda x: chinese_to_ipa2(x.group(1))+' ', text)
31
- # text = re.sub(r'\[JA\](.*?)\[JA\]',
32
- # lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
33
- # text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
34
- # '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
35
- # text = re.sub(r'\[GD\](.*?)\[GD\]',
36
- # lambda x: cantonese_to_ipa(x.group(1))+' ', text)
37
- # text = re.sub(r'\[EN\](.*?)\[EN\]',
38
- # lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
39
- # text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
40
- # 1)).replace('ʣ', 'dz').replace('ʥ', '').replace('ʦ', 'ts').replace('ʨ', '')+' ', text)
41
- # text = re.sub(r'\s+$', '', text)
42
- # text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
43
- # return text
 
 
 
 
 
 
 
1
  import re
 
 
 
 
 
2
 
3
 
4
  def japanese_cleaners(text):
5
+ from text.japanese import japanese_to_romaji_with_accent
6
  text = japanese_to_romaji_with_accent(text)
7
  text = re.sub(r'([A-Za-z])$', r'\1.', text)
8
  return text
 
12
  return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
13
 
14
 
15
+ def korean_cleaners(text):
16
+ '''Pipeline for Korean text'''
17
+ from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
18
+ text = latin_to_hangul(text)
19
+ text = number_to_hangul(text)
20
+ text = divide_hangul(text)
21
+ text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
22
+ return text
23
+
24
+
25
+ def chinese_cleaners(text):
26
+ '''Pipeline for Chinese text'''
27
+ from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
28
+ text = number_to_chinese(text)
29
+ text = chinese_to_bopomofo(text)
30
+ text = latin_to_bopomofo(text)
31
+ text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
32
+ return text
33
+
34
+
35
+ def zh_ja_mixture_cleaners(text):
36
+ from text.mandarin import chinese_to_romaji
37
+ from text.japanese import japanese_to_romaji_with_accent
38
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
39
+ lambda x: chinese_to_romaji(x.group(1))+' ', text)
40
+ text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
41
+ x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
42
+ text = re.sub(r'\s+$', '', text)
43
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
44
+ return text
45
+
46
+
47
+ def sanskrit_cleaners(text):
48
+ text = text.replace('॥', '।').replace('ॐ', 'ओम्')
49
+ if text[-1] != '।':
50
+ text += ' ।'
51
+ return text
52
+
53
+
54
+ def cjks_cleaners(text):
55
+ from text.mandarin import chinese_to_lazy_ipa
56
+ from text.japanese import japanese_to_ipa
57
+ from text.korean import korean_to_lazy_ipa
58
+ from text.sanskrit import devanagari_to_ipa
59
+ from text.english import english_to_lazy_ipa
60
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
61
+ lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
62
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
63
+ lambda x: japanese_to_ipa(x.group(1))+' ', text)
64
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
65
+ lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
66
+ text = re.sub(r'\[SA\](.*?)\[SA\]',
67
+ lambda x: devanagari_to_ipa(x.group(1))+' ', text)
68
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
69
+ lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
70
+ text = re.sub(r'\s+$', '', text)
71
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
72
+ return text
73
+
74
+
75
+ def cjke_cleaners(text):
76
+ from text.mandarin import chinese_to_lazy_ipa
77
+ from text.japanese import japanese_to_ipa
78
+ from text.korean import korean_to_ipa
79
+ from text.english import english_to_ipa2
80
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
81
+ 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
82
+ text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
83
+ 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
84
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
85
+ lambda x: korean_to_ipa(x.group(1))+' ', text)
86
+ text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
87
+ 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
88
+ text = re.sub(r'\s+$', '', text)
89
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
90
+ return text
91
+
92
+
93
+ def cjke_cleaners2(text):
94
+ from text.mandarin import chinese_to_ipa
95
+ from text.japanese import japanese_to_ipa2
96
+ from text.korean import korean_to_ipa
97
+ from text.english import english_to_ipa2
98
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
99
+ lambda x: chinese_to_ipa(x.group(1))+' ', text)
100
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
101
+ lambda x: japanese_to_ipa2(x.group(1))+' ', text)
102
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
103
+ lambda x: korean_to_ipa(x.group(1))+' ', text)
104
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
105
+ lambda x: english_to_ipa2(x.group(1))+' ', text)
106
+ text = re.sub(r'\s+$', '', text)
107
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
108
+ return text
109
+
110
 
111
+ def thai_cleaners(text):
112
+ from text.thai import num_to_thai, latin_to_thai
113
+ text = num_to_thai(text)
114
+ text = latin_to_thai(text)
115
+ return text
116
 
117
 
118
+ def shanghainese_cleaners(text):
119
+ from text.shanghainese import shanghainese_to_ipa
120
+ text = shanghainese_to_ipa(text)
121
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
122
+ return text
123
 
124
 
125
+ def chinese_dialect_cleaners(text):
126
+ from text.mandarin import chinese_to_ipa2
127
+ from text.japanese import japanese_to_ipa3
128
+ from text.shanghainese import shanghainese_to_ipa
129
+ from text.cantonese import cantonese_to_ipa
130
+ from text.english import english_to_lazy_ipa2
131
+ from text.ngu_dialect import ngu_dialect_to_ipa
132
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
133
+ lambda x: chinese_to_ipa2(x.group(1))+' ', text)
134
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
135
+ lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
136
+ text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
137
+ '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('', 'ɐ').replace('', 'e')+' ', text)
138
+ text = re.sub(r'\[GD\](.*?)\[GD\]',
139
+ lambda x: cantonese_to_ipa(x.group(1))+' ', text)
140
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
141
+ lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
142
+ text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
143
+ 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
144
+ text = re.sub(r'\s+$', '', text)
145
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
146
+ return text
text/mandarin.py CHANGED
@@ -6,6 +6,10 @@ import jieba
6
  import cn2an
7
  import logging
8
 
 
 
 
 
9
 
10
  # List of (Latin alphabet, bopomofo) pairs:
11
  _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
 
6
  import cn2an
7
  import logging
8
 
9
+ logging.getLogger('jieba').setLevel(logging.WARNING)
10
+ jieba.set_dictionary(os.path.dirname(sys.argv[0])+'/jieba/dict.txt')
11
+ jieba.initialize()
12
+
13
 
14
  # List of (Latin alphabet, bopomofo) pairs:
15
  _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
text/thai.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from num_thai.thainumbers import NumThai
3
+
4
+
5
+ num = NumThai()
6
+
7
+ # List of (Latin alphabet, Thai) pairs:
8
+ _latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
9
+ ('a', 'เอ'),
10
+ ('b','บี'),
11
+ ('c','ซี'),
12
+ ('d','ดี'),
13
+ ('e','อี'),
14
+ ('f','เอฟ'),
15
+ ('g','จี'),
16
+ ('h','เอช'),
17
+ ('i','ไอ'),
18
+ ('j','เจ'),
19
+ ('k','เค'),
20
+ ('l','แอล'),
21
+ ('m','เอ็ม'),
22
+ ('n','เอ็น'),
23
+ ('o','โอ'),
24
+ ('p','พี'),
25
+ ('q','คิว'),
26
+ ('r','แอร์'),
27
+ ('s','เอส'),
28
+ ('t','ที'),
29
+ ('u','ยู'),
30
+ ('v','วี'),
31
+ ('w','ดับเบิลยู'),
32
+ ('x','เอ็กซ์'),
33
+ ('y','วาย'),
34
+ ('z','ซี')
35
+ ]]
36
+
37
+
38
+ def num_to_thai(text):
39
+ return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text)
40
+
41
+ def latin_to_thai(text):
42
+ for regex, replacement in _latin_to_thai:
43
+ text = re.sub(regex, replacement, text)
44
+ return text
utils.py CHANGED
@@ -1,229 +1,8 @@
1
- import os
2
- import glob
3
- import sys
4
- import argparse
5
  import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
- logger = logging
16
-
17
-
18
- def load_checkpoint(checkpoint_path, model, optimizer=None):
19
- assert os.path.isfile(checkpoint_path)
20
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
- iteration = checkpoint_dict['iteration']
22
- learning_rate = checkpoint_dict['learning_rate']
23
- if optimizer is not None:
24
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
- saved_state_dict = checkpoint_dict['model']
26
- if hasattr(model, 'module'):
27
- state_dict = model.module.state_dict()
28
- else:
29
- state_dict = model.state_dict()
30
- new_state_dict= {}
31
- for k, v in state_dict.items():
32
- try:
33
- new_state_dict[k] = saved_state_dict[k]
34
- except:
35
- logger.info("%s is not in the checkpoint" % k)
36
- new_state_dict[k] = v
37
- if hasattr(model, 'module'):
38
- model.module.load_state_dict(new_state_dict)
39
- else:
40
- model.load_state_dict(new_state_dict)
41
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
42
- checkpoint_path, iteration))
43
- return model, optimizer, learning_rate, iteration
44
-
45
-
46
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
47
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
48
- iteration, checkpoint_path))
49
- if hasattr(model, 'module'):
50
- state_dict = model.module.state_dict()
51
- else:
52
- state_dict = model.state_dict()
53
- torch.save({'model': state_dict,
54
- 'iteration': iteration,
55
- 'optimizer': optimizer.state_dict(),
56
- 'learning_rate': learning_rate}, checkpoint_path)
57
-
58
-
59
- def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
60
- for k, v in scalars.items():
61
- writer.add_scalar(k, v, global_step)
62
- for k, v in histograms.items():
63
- writer.add_histogram(k, v, global_step)
64
- for k, v in images.items():
65
- writer.add_image(k, v, global_step, dataformats='HWC')
66
- for k, v in audios.items():
67
- writer.add_audio(k, v, global_step, audio_sampling_rate)
68
-
69
-
70
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
71
- f_list = glob.glob(os.path.join(dir_path, regex))
72
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
73
- x = f_list[-1]
74
- print(x)
75
- return x
76
-
77
-
78
- def plot_spectrogram_to_numpy(spectrogram):
79
- global MATPLOTLIB_FLAG
80
- if not MATPLOTLIB_FLAG:
81
- import matplotlib
82
- matplotlib.use("Agg")
83
- MATPLOTLIB_FLAG = True
84
- mpl_logger = logging.getLogger('matplotlib')
85
- mpl_logger.setLevel(logging.WARNING)
86
- import matplotlib.pylab as plt
87
- import numpy as np
88
-
89
- fig, ax = plt.subplots(figsize=(10,2))
90
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
91
- interpolation='none')
92
- plt.colorbar(im, ax=ax)
93
- plt.xlabel("Frames")
94
- plt.ylabel("Channels")
95
- plt.tight_layout()
96
-
97
- fig.canvas.draw()
98
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
99
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
100
- plt.close()
101
- return data
102
-
103
-
104
- def plot_alignment_to_numpy(alignment, info=None):
105
- global MATPLOTLIB_FLAG
106
- if not MATPLOTLIB_FLAG:
107
- import matplotlib
108
- matplotlib.use("Agg")
109
- MATPLOTLIB_FLAG = True
110
- mpl_logger = logging.getLogger('matplotlib')
111
- mpl_logger.setLevel(logging.WARNING)
112
- import matplotlib.pylab as plt
113
- import numpy as np
114
-
115
- fig, ax = plt.subplots(figsize=(6, 4))
116
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
117
- interpolation='none')
118
- fig.colorbar(im, ax=ax)
119
- xlabel = 'Decoder timestep'
120
- if info is not None:
121
- xlabel += '\n\n' + info
122
- plt.xlabel(xlabel)
123
- plt.ylabel('Encoder timestep')
124
- plt.tight_layout()
125
-
126
- fig.canvas.draw()
127
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
128
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
129
- plt.close()
130
- return data
131
-
132
-
133
- def load_wav_to_torch(full_path):
134
- sampling_rate, data = read(full_path)
135
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
136
-
137
-
138
- def load_filepaths_and_text(filename, split="|"):
139
- with open(filename, encoding='utf-8') as f:
140
- filepaths_and_text = [line.strip().split(split) for line in f]
141
- return filepaths_and_text
142
-
143
-
144
- def get_hparams(init=True):
145
- parser = argparse.ArgumentParser()
146
- parser.add_argument('-c', '--config', type=str, default="./configs/uma87.json",
147
- help='JSON file for configuration')
148
- parser.add_argument('-m', '--model', type=str, default="./pretrained_models/uma_0epoch.pth",
149
- help='Model name')
150
-
151
- args = parser.parse_args()
152
- model_dir = os.path.join("../drive/MyDrive", args.model)
153
-
154
- if not os.path.exists(model_dir):
155
- os.makedirs(model_dir)
156
-
157
- config_path = args.config
158
- config_save_path = os.path.join(model_dir, "config.json")
159
- if init:
160
- with open(config_path, "r") as f:
161
- data = f.read()
162
- with open(config_save_path, "w") as f:
163
- f.write(data)
164
- else:
165
- with open(config_save_path, "r") as f:
166
- data = f.read()
167
- config = json.loads(data)
168
-
169
- hparams = HParams(**config)
170
- hparams.model_dir = model_dir
171
- return hparams
172
-
173
-
174
- def get_hparams_from_dir(model_dir):
175
- config_save_path = os.path.join(model_dir, "config.json")
176
- with open(config_save_path, "r") as f:
177
- data = f.read()
178
- config = json.loads(data)
179
-
180
- hparams =HParams(**config)
181
- hparams.model_dir = model_dir
182
- return hparams
183
-
184
-
185
- def get_hparams_from_file(config_path):
186
- with open(config_path, "r") as f:
187
- data = f.read()
188
- config = json.loads(data)
189
-
190
- hparams =HParams(**config)
191
- return hparams
192
-
193
-
194
- def check_git_hash(model_dir):
195
- source_dir = os.path.dirname(os.path.realpath(__file__))
196
- if not os.path.exists(os.path.join(source_dir, ".git")):
197
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
198
- source_dir
199
- ))
200
- return
201
-
202
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
203
-
204
- path = os.path.join(model_dir, "githash")
205
- if os.path.exists(path):
206
- saved_hash = open(path).read()
207
- if saved_hash != cur_hash:
208
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
209
- saved_hash[:8], cur_hash[:8]))
210
- else:
211
- open(path, "w").write(cur_hash)
212
-
213
-
214
- def get_logger(model_dir, filename="train.log"):
215
- global logger
216
- logger = logging.getLogger(os.path.basename(model_dir))
217
- logger.setLevel(logging.DEBUG)
218
-
219
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
220
- if not os.path.exists(model_dir):
221
- os.makedirs(model_dir)
222
- h = logging.FileHandler(os.path.join(model_dir, filename))
223
- h.setLevel(logging.DEBUG)
224
- h.setFormatter(formatter)
225
- logger.addHandler(h)
226
- return logger
227
 
228
 
229
  class HParams():
@@ -232,7 +11,7 @@ class HParams():
232
  if type(v) == dict:
233
  v = HParams(**v)
234
  self[k] = v
235
-
236
  def keys(self):
237
  return self.__dict__.keys()
238
 
@@ -256,3 +35,41 @@ class HParams():
256
 
257
  def __repr__(self):
258
  return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import logging
2
+ from json import loads
3
+ from torch import load, FloatTensor
4
+ from numpy import float32
5
+ import librosa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
 
8
  class HParams():
 
11
  if type(v) == dict:
12
  v = HParams(**v)
13
  self[k] = v
14
+
15
  def keys(self):
16
  return self.__dict__.keys()
17
 
 
35
 
36
  def __repr__(self):
37
  return self.__dict__.__repr__()
38
+
39
+
40
+ def load_checkpoint(checkpoint_path, model):
41
+ checkpoint_dict = load(checkpoint_path, map_location='cpu')
42
+ iteration = checkpoint_dict['iteration']
43
+ saved_state_dict = checkpoint_dict['model']
44
+ if hasattr(model, 'module'):
45
+ state_dict = model.module.state_dict()
46
+ else:
47
+ state_dict = model.state_dict()
48
+ new_state_dict= {}
49
+ for k, v in state_dict.items():
50
+ try:
51
+ new_state_dict[k] = saved_state_dict[k]
52
+ except:
53
+ logging.info("%s is not in the checkpoint" % k)
54
+ new_state_dict[k] = v
55
+ if hasattr(model, 'module'):
56
+ model.module.load_state_dict(new_state_dict)
57
+ else:
58
+ model.load_state_dict(new_state_dict)
59
+ logging.info("Loaded checkpoint '{}' (iteration {})" .format(
60
+ checkpoint_path, iteration))
61
+ return
62
+
63
+
64
+ def get_hparams_from_file(config_path):
65
+ with open(config_path, "r") as f:
66
+ data = f.read()
67
+ config = loads(data)
68
+
69
+ hparams = HParams(**config)
70
+ return hparams
71
+
72
+
73
+ def load_audio_to_torch(full_path, target_sampling_rate):
74
+ audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
75
+ return FloatTensor(audio.astype(float32))