innnky commited on
Commit
2478285
1 Parent(s): 59192f8
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2021 Jaehyeon Kim
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
4
+
5
+ import logging
6
+
7
+ numba_logger = logging.getLogger('numba')
8
+ numba_logger.setLevel(logging.WARNING)
9
+ import librosa
10
+ import torch
11
+ import commons
12
+ import utils
13
+ from models import SynthesizerTrn
14
+ from text.symbols import symbols
15
+ from text import text_to_sequence
16
+ import numpy as np
17
+ import soundfile as sf
18
+ from preprocess_wave import FeatureInput
19
+
20
+ def resize2d(x, target_len):
21
+ source = np.array(x)
22
+ source[source<0.001] = np.nan
23
+ target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
24
+ res = np.nan_to_num(target)
25
+ return res
26
+
27
+ def transcribe(path, length, transform):
28
+ featur_pit = featureInput.compute_f0(path)
29
+ featur_pit = featur_pit * 2**(transform/12)
30
+ featur_pit = resize2d(featur_pit, length)
31
+ coarse_pit = featureInput.coarse_f0(featur_pit)
32
+ return coarse_pit
33
+
34
+ def get_text(text, hps):
35
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
36
+ if hps.data.add_blank:
37
+ text_norm = commons.intersperse(text_norm, 0)
38
+ text_norm = torch.LongTensor(text_norm)
39
+ print(text_norm.shape)
40
+ return text_norm
41
+
42
+
43
+ hps_ms = utils.get_hparams_from_file("configs/vctk_base.json")
44
+ net_g_ms = SynthesizerTrn(
45
+ len(symbols),
46
+ hps_ms.data.filter_length // 2 + 1,
47
+ hps_ms.train.segment_size // hps_ms.data.hop_length,
48
+ n_speakers=hps_ms.data.n_speakers,
49
+ **hps_ms.model)
50
+
51
+ featureInput = FeatureInput(hps_ms.data.sampling_rate, hps_ms.data.hop_length)
52
+
53
+
54
+ hubert = torch.hub.load("bshall/hubert:main", "hubert_soft")
55
+
56
+ _ = utils.load_checkpoint("G_312000.pth", net_g_ms, None)
57
+
58
+ def vc_fn(input_audio,vc_transform):
59
+ if input_audio is None:
60
+ return "You need to upload an audio", None
61
+ sampling_rate, audio = input_audio
62
+ # print(audio.shape,sampling_rate)
63
+ duration = audio.shape[0] / sampling_rate
64
+ if duration > 45:
65
+ return "Error: Audio is too long", None
66
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
67
+ if len(audio.shape) > 1:
68
+ audio = librosa.to_mono(audio.transpose(1, 0))
69
+ if sampling_rate != 16000:
70
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
71
+
72
+ source = torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0)
73
+ print(source.shape)
74
+ with torch.inference_mode():
75
+ units = hubert.units(source)
76
+ soft = units.squeeze(0).numpy()
77
+ audio22050 = librosa.resample(audio, orig_sr=16000, target_sr=22050)
78
+ sf.write("temp.wav", audio22050, 22050)
79
+ pitch = transcribe("temp.wav", soft.shape[0], vc_transform)
80
+ pitch = torch.LongTensor(pitch).unsqueeze(0)
81
+ sid = torch.LongTensor([0])
82
+ stn_tst = torch.FloatTensor(soft)
83
+ with torch.no_grad():
84
+ x_tst = stn_tst.unsqueeze(0)
85
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
86
+ audio = net_g_ms.infer(x_tst, x_tst_lengths, pitch=pitch,sid=sid, noise_scale=0.4,
87
+ noise_scale_w=0.1, length_scale=1)[0][0, 0].data.float().numpy()
88
+
89
+ return "Success", (hps_ms.data.sampling_rate, audio)
90
+
91
+
92
+
93
+ app = gr.Blocks()
94
+ with app:
95
+ with gr.Tabs():
96
+ with gr.TabItem("Basic"):
97
+ vc_input3 = gr.Audio(label="Input Audio (30s limitation)")
98
+ vc_transform = gr.Number(label="transform",value=1.0)
99
+ vc_submit = gr.Button("Convert", variant="primary")
100
+ vc_output1 = gr.Textbox(label="Output Message")
101
+ vc_output2 = gr.Audio(label="Output Audio")
102
+ vc_submit.click(vc_fn, [ vc_input3,vc_transform], [vc_output1, vc_output2])
103
+
104
+ app.launch()
attentions.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ import commons
9
+ import modules
10
+ from modules import LayerNorm
11
+
12
+
13
+ class Encoder(nn.Module):
14
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
15
+ super().__init__()
16
+ self.hidden_channels = hidden_channels
17
+ self.filter_channels = filter_channels
18
+ self.n_heads = n_heads
19
+ self.n_layers = n_layers
20
+ self.kernel_size = kernel_size
21
+ self.p_dropout = p_dropout
22
+ self.window_size = window_size
23
+
24
+ self.drop = nn.Dropout(p_dropout)
25
+ self.attn_layers = nn.ModuleList()
26
+ self.norm_layers_1 = nn.ModuleList()
27
+ self.ffn_layers = nn.ModuleList()
28
+ self.norm_layers_2 = nn.ModuleList()
29
+ for i in range(self.n_layers):
30
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
31
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
32
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
33
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
34
+
35
+ def forward(self, x, x_mask):
36
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
37
+ x = x * x_mask
38
+ for i in range(self.n_layers):
39
+ y = self.attn_layers[i](x, x, attn_mask)
40
+ y = self.drop(y)
41
+ x = self.norm_layers_1[i](x + y)
42
+
43
+ y = self.ffn_layers[i](x, x_mask)
44
+ y = self.drop(y)
45
+ x = self.norm_layers_2[i](x + y)
46
+ x = x * x_mask
47
+ return x
48
+
49
+
50
+ class Decoder(nn.Module):
51
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
52
+ super().__init__()
53
+ self.hidden_channels = hidden_channels
54
+ self.filter_channels = filter_channels
55
+ self.n_heads = n_heads
56
+ self.n_layers = n_layers
57
+ self.kernel_size = kernel_size
58
+ self.p_dropout = p_dropout
59
+ self.proximal_bias = proximal_bias
60
+ self.proximal_init = proximal_init
61
+
62
+ self.drop = nn.Dropout(p_dropout)
63
+ self.self_attn_layers = nn.ModuleList()
64
+ self.norm_layers_0 = nn.ModuleList()
65
+ self.encdec_attn_layers = nn.ModuleList()
66
+ self.norm_layers_1 = nn.ModuleList()
67
+ self.ffn_layers = nn.ModuleList()
68
+ self.norm_layers_2 = nn.ModuleList()
69
+ for i in range(self.n_layers):
70
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
71
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
72
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
73
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
74
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
75
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
76
+
77
+ def forward(self, x, x_mask, h, h_mask):
78
+ """
79
+ x: decoder input
80
+ h: encoder output
81
+ """
82
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
83
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
84
+ x = x * x_mask
85
+ for i in range(self.n_layers):
86
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
87
+ y = self.drop(y)
88
+ x = self.norm_layers_0[i](x + y)
89
+
90
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
91
+ y = self.drop(y)
92
+ x = self.norm_layers_1[i](x + y)
93
+
94
+ y = self.ffn_layers[i](x, x_mask)
95
+ y = self.drop(y)
96
+ x = self.norm_layers_2[i](x + y)
97
+ x = x * x_mask
98
+ return x
99
+
100
+
101
+ class MultiHeadAttention(nn.Module):
102
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
103
+ super().__init__()
104
+ assert channels % n_heads == 0
105
+
106
+ self.channels = channels
107
+ self.out_channels = out_channels
108
+ self.n_heads = n_heads
109
+ self.p_dropout = p_dropout
110
+ self.window_size = window_size
111
+ self.heads_share = heads_share
112
+ self.block_length = block_length
113
+ self.proximal_bias = proximal_bias
114
+ self.proximal_init = proximal_init
115
+ self.attn = None
116
+
117
+ self.k_channels = channels // n_heads
118
+ self.conv_q = nn.Conv1d(channels, channels, 1)
119
+ self.conv_k = nn.Conv1d(channels, channels, 1)
120
+ self.conv_v = nn.Conv1d(channels, channels, 1)
121
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
122
+ self.drop = nn.Dropout(p_dropout)
123
+
124
+ if window_size is not None:
125
+ n_heads_rel = 1 if heads_share else n_heads
126
+ rel_stddev = self.k_channels**-0.5
127
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
128
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
129
+
130
+ nn.init.xavier_uniform_(self.conv_q.weight)
131
+ nn.init.xavier_uniform_(self.conv_k.weight)
132
+ nn.init.xavier_uniform_(self.conv_v.weight)
133
+ if proximal_init:
134
+ with torch.no_grad():
135
+ self.conv_k.weight.copy_(self.conv_q.weight)
136
+ self.conv_k.bias.copy_(self.conv_q.bias)
137
+
138
+ def forward(self, x, c, attn_mask=None):
139
+ q = self.conv_q(x)
140
+ k = self.conv_k(c)
141
+ v = self.conv_v(c)
142
+
143
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
144
+
145
+ x = self.conv_o(x)
146
+ return x
147
+
148
+ def attention(self, query, key, value, mask=None):
149
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
150
+ b, d, t_s, t_t = (*key.size(), query.size(2))
151
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
152
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
153
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
154
+
155
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
156
+ if self.window_size is not None:
157
+ assert t_s == t_t, "Relative attention is only available for self-attention."
158
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
159
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
160
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
161
+ scores = scores + scores_local
162
+ if self.proximal_bias:
163
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
164
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
165
+ if mask is not None:
166
+ scores = scores.masked_fill(mask == 0, -1e4)
167
+ if self.block_length is not None:
168
+ assert t_s == t_t, "Local attention is only available for self-attention."
169
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
170
+ scores = scores.masked_fill(block_mask == 0, -1e4)
171
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
172
+ p_attn = self.drop(p_attn)
173
+ output = torch.matmul(p_attn, value)
174
+ if self.window_size is not None:
175
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
176
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
177
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
178
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
179
+ return output, p_attn
180
+
181
+ def _matmul_with_relative_values(self, x, y):
182
+ """
183
+ x: [b, h, l, m]
184
+ y: [h or 1, m, d]
185
+ ret: [b, h, l, d]
186
+ """
187
+ ret = torch.matmul(x, y.unsqueeze(0))
188
+ return ret
189
+
190
+ def _matmul_with_relative_keys(self, x, y):
191
+ """
192
+ x: [b, h, l, d]
193
+ y: [h or 1, m, d]
194
+ ret: [b, h, l, m]
195
+ """
196
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
197
+ return ret
198
+
199
+ def _get_relative_embeddings(self, relative_embeddings, length):
200
+ max_relative_position = 2 * self.window_size + 1
201
+ # Pad first before slice to avoid using cond ops.
202
+ pad_length = max(length - (self.window_size + 1), 0)
203
+ slice_start_position = max((self.window_size + 1) - length, 0)
204
+ slice_end_position = slice_start_position + 2 * length - 1
205
+ if pad_length > 0:
206
+ padded_relative_embeddings = F.pad(
207
+ relative_embeddings,
208
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
209
+ else:
210
+ padded_relative_embeddings = relative_embeddings
211
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
212
+ return used_relative_embeddings
213
+
214
+ def _relative_position_to_absolute_position(self, x):
215
+ """
216
+ x: [b, h, l, 2*l-1]
217
+ ret: [b, h, l, l]
218
+ """
219
+ batch, heads, length, _ = x.size()
220
+ # Concat columns of pad to shift from relative to absolute indexing.
221
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
222
+
223
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
224
+ x_flat = x.view([batch, heads, length * 2 * length])
225
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
226
+
227
+ # Reshape and slice out the padded elements.
228
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
229
+ return x_final
230
+
231
+ def _absolute_position_to_relative_position(self, x):
232
+ """
233
+ x: [b, h, l, l]
234
+ ret: [b, h, l, 2*l-1]
235
+ """
236
+ batch, heads, length, _ = x.size()
237
+ # padd along column
238
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
239
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
240
+ # add 0's in the beginning that will skew the elements after reshape
241
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
242
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
243
+ return x_final
244
+
245
+ def _attention_bias_proximal(self, length):
246
+ """Bias for self-attention to encourage attention to close positions.
247
+ Args:
248
+ length: an integer scalar.
249
+ Returns:
250
+ a Tensor with shape [1, 1, length, length]
251
+ """
252
+ r = torch.arange(length, dtype=torch.float32)
253
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
254
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
255
+
256
+
257
+ class FFN(nn.Module):
258
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
259
+ super().__init__()
260
+ self.in_channels = in_channels
261
+ self.out_channels = out_channels
262
+ self.filter_channels = filter_channels
263
+ self.kernel_size = kernel_size
264
+ self.p_dropout = p_dropout
265
+ self.activation = activation
266
+ self.causal = causal
267
+
268
+ if causal:
269
+ self.padding = self._causal_padding
270
+ else:
271
+ self.padding = self._same_padding
272
+
273
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
274
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
275
+ self.drop = nn.Dropout(p_dropout)
276
+
277
+ def forward(self, x, x_mask):
278
+ x = self.conv_1(self.padding(x * x_mask))
279
+ if self.activation == "gelu":
280
+ x = x * torch.sigmoid(1.702 * x)
281
+ else:
282
+ x = torch.relu(x)
283
+ x = self.drop(x)
284
+ x = self.conv_2(self.padding(x * x_mask))
285
+ return x * x_mask
286
+
287
+ def _causal_padding(self, x):
288
+ if self.kernel_size == 1:
289
+ return x
290
+ pad_l = self.kernel_size - 1
291
+ pad_r = 0
292
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
293
+ x = F.pad(x, commons.convert_pad_shape(padding))
294
+ return x
295
+
296
+ def _same_padding(self, x):
297
+ if self.kernel_size == 1:
298
+ return x
299
+ pad_l = (self.kernel_size - 1) // 2
300
+ pad_r = self.kernel_size // 2
301
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
302
+ x = F.pad(x, commons.convert_pad_shape(padding))
303
+ return x
commons.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size*dilation - dilation)/2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def intersperse(lst, item):
25
+ result = [item] * (len(lst) * 2 + 1)
26
+ result[1::2] = lst
27
+ return result
28
+
29
+
30
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
31
+ """KL(P||Q)"""
32
+ kl = (logs_q - logs_p) - 0.5
33
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ ret = torch.zeros_like(x[:, :, :segment_size])
50
+ for i in range(x.size(0)):
51
+ idx_str = ids_str[i]
52
+ idx_end = idx_str + segment_size
53
+ ret[i] = x[i, :, idx_str:idx_end]
54
+ return ret
55
+
56
+
57
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
+ b, d, t = x.size()
59
+ if x_lengths is None:
60
+ x_lengths = t
61
+ ids_str_max = x_lengths - segment_size + 1
62
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
+ ret = slice_segments(x, ids_str, segment_size)
64
+ return ret, ids_str
65
+
66
+
67
+ def get_timing_signal_1d(
68
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
+ position = torch.arange(length, dtype=torch.float)
70
+ num_timescales = channels // 2
71
+ log_timescale_increment = (
72
+ math.log(float(max_timescale) / float(min_timescale)) /
73
+ (num_timescales - 1))
74
+ inv_timescales = min_timescale * torch.exp(
75
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
79
+ signal = signal.view(1, channels, length)
80
+ return signal
81
+
82
+
83
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
+ b, channels, length = x.size()
85
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
+ return x + signal.to(dtype=x.dtype, device=x.device)
87
+
88
+
89
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
+ b, channels, length = x.size()
91
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
+
94
+
95
+ def subsequent_mask(length):
96
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
+ return mask
98
+
99
+
100
+ @torch.jit.script
101
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
+ n_channels_int = n_channels[0]
103
+ in_act = input_a + input_b
104
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
+ acts = t_act * s_act
107
+ return acts
108
+
109
+
110
+ def convert_pad_shape(pad_shape):
111
+ l = pad_shape[::-1]
112
+ pad_shape = [item for sublist in l for item in sublist]
113
+ return pad_shape
114
+
115
+
116
+ def shift_1d(x):
117
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
+ return x
119
+
120
+
121
+ def sequence_mask(length, max_length=None):
122
+ if max_length is None:
123
+ max_length = length.max()
124
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
+ return x.unsqueeze(0) < length.unsqueeze(1)
126
+
127
+
128
+ def generate_path(duration, mask):
129
+ """
130
+ duration: [b, 1, t_x]
131
+ mask: [b, 1, t_y, t_x]
132
+ """
133
+ device = duration.device
134
+
135
+ b, _, t_y, t_x = mask.shape
136
+ cum_duration = torch.cumsum(duration, -1)
137
+
138
+ cum_duration_flat = cum_duration.view(b * t_x)
139
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
140
+ path = path.view(b, t_x, t_y)
141
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
+ path = path.unsqueeze(1).transpose(2,3) * mask
143
+ return path
144
+
145
+
146
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
147
+ if isinstance(parameters, torch.Tensor):
148
+ parameters = [parameters]
149
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
150
+ norm_type = float(norm_type)
151
+ if clip_value is not None:
152
+ clip_value = float(clip_value)
153
+
154
+ total_norm = 0
155
+ for p in parameters:
156
+ param_norm = p.grad.data.norm(norm_type)
157
+ total_norm += param_norm.item() ** norm_type
158
+ if clip_value is not None:
159
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
+ total_norm = total_norm ** (1. / norm_type)
161
+ return total_norm
configs/nyarumul.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 2000,
5
+ "seed": 1234,
6
+ "epochs": 10000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 16,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"/content/drive/MyDrive/SingingVC/trainmul.txt",
21
+ "validation_files":"/content/drive/MyDrive/SingingVC/valmul.txt",
22
+ "text_cleaners":["english_cleaners2"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 3,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 256,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256
52
+ }
53
+ }
configs/nyarusing.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 2000,
5
+ "seed": 1234,
6
+ "epochs": 20000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 24,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"/content/train.txt",
21
+ "validation_files":"/content/nyarusing/val.txt",
22
+ "text_cleaners":["english_cleaners2"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 0,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 256,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false
51
+ }
52
+ }
data_utils.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import os
3
+ import random
4
+ import numpy as np
5
+ import torch
6
+ import torch.utils.data
7
+ import numpy as np
8
+ import commons
9
+ from mel_processing import spectrogram_torch
10
+ from utils import load_wav_to_torch, load_filepaths_and_text
11
+ from text import text_to_sequence, cleaned_text_to_sequence
12
+
13
+
14
+ def dropout1d(myarray, ratio=0.5):
15
+ indices = np.random.choice(np.arange(myarray.size), replace=False,
16
+ size=int(myarray.size * ratio))
17
+ myarray[indices] = 0
18
+ return myarray
19
+
20
+
21
+ class TextAudioLoader(torch.utils.data.Dataset):
22
+ """
23
+ 1) loads audio, text pairs
24
+ 2) normalizes text and converts them to sequences of integers
25
+ 3) computes spectrograms from audio files.
26
+ """
27
+
28
+ def __init__(self, audiopaths_and_text, hparams):
29
+ self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
30
+ self.text_cleaners = hparams.text_cleaners
31
+ self.max_wav_value = hparams.max_wav_value
32
+ self.sampling_rate = hparams.sampling_rate
33
+ self.filter_length = hparams.filter_length
34
+ self.hop_length = hparams.hop_length
35
+ self.win_length = hparams.win_length
36
+ self.sampling_rate = hparams.sampling_rate
37
+
38
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
39
+
40
+ self.add_blank = hparams.add_blank
41
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
42
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
43
+
44
+ random.seed(1234)
45
+ random.shuffle(self.audiopaths_and_text)
46
+ self._filter()
47
+
48
+ def _filter(self):
49
+ """
50
+ Filter text & store spec lengths
51
+ """
52
+ # Store spectrogram lengths for Bucketing
53
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
54
+ # spec_length = wav_length // hop_length
55
+ lengths = []
56
+ for audiopath, text, pitch in self.audiopaths_and_text:
57
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
58
+ self.lengths = lengths
59
+
60
+ def get_audio_text_pair(self, audiopath_and_text):
61
+ # separate filename and text
62
+ audiopath, text, pitch = audiopath_and_text[0], audiopath_and_text[1],audiopath_and_text[2]
63
+ text = self.get_text(text)
64
+ spec, wav = self.get_audio(audiopath)
65
+ pitch = self.get_pitch(pitch)
66
+ return (text, spec, wav, pitch)
67
+
68
+ def get_pitch(self, pitch):
69
+
70
+ return torch.LongTensor(np.load(pitch))
71
+
72
+ def get_audio(self, filename):
73
+ audio, sampling_rate = load_wav_to_torch(filename)
74
+ if sampling_rate != self.sampling_rate:
75
+ raise ValueError("{} {} SR doesn't match target {} SR".format(
76
+ sampling_rate, self.sampling_rate))
77
+ audio_norm = audio / self.max_wav_value
78
+ audio_norm = audio_norm.unsqueeze(0)
79
+ spec_filename = filename.replace(".wav", ".spec.pt")
80
+ if os.path.exists(spec_filename):
81
+ spec = torch.load(spec_filename)
82
+ else:
83
+ spec = spectrogram_torch(audio_norm, self.filter_length,
84
+ self.sampling_rate, self.hop_length, self.win_length,
85
+ center=False)
86
+ spec = torch.squeeze(spec, 0)
87
+ torch.save(spec, spec_filename)
88
+ return spec, audio_norm
89
+
90
+ def get_text(self, text):
91
+ soft = np.load(text)
92
+ text_norm = torch.FloatTensor(soft)
93
+ return text_norm
94
+
95
+ def __getitem__(self, index):
96
+ return self.get_audio_text_pair(self.audiopaths_and_text[index])
97
+
98
+ def __len__(self):
99
+ return len(self.audiopaths_and_text)
100
+
101
+
102
+ class TextAudioCollate():
103
+ """ Zero-pads model inputs and targets
104
+ """
105
+
106
+ def __init__(self, return_ids=False):
107
+ self.return_ids = return_ids
108
+
109
+ def __call__(self, batch):
110
+ """Collate's training batch from normalized text and aduio
111
+ PARAMS
112
+ ------
113
+ batch: [text_normalized, spec_normalized, wav_normalized]
114
+ """
115
+ # Right zero-pad all one-hot text sequences to max input length
116
+ _, ids_sorted_decreasing = torch.sort(
117
+ torch.LongTensor([x[1].size(1) for x in batch]),
118
+ dim=0, descending=True)
119
+
120
+ max_text_len = max([len(x[0]) for x in batch])
121
+ max_spec_len = max([x[1].size(1) for x in batch])
122
+ max_wav_len = max([x[2].size(1) for x in batch])
123
+ max_pitch_len = max([x[3].shape[0] for x in batch])
124
+ # print(batch)
125
+
126
+
127
+ text_lengths = torch.LongTensor(len(batch))
128
+ spec_lengths = torch.LongTensor(len(batch))
129
+ wav_lengths = torch.LongTensor(len(batch))
130
+
131
+ text_padded = torch.FloatTensor(len(batch), max_text_len, 256)
132
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
133
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
134
+ pitch_padded = torch.LongTensor(len(batch), max_pitch_len)
135
+
136
+ text_padded.zero_()
137
+ spec_padded.zero_()
138
+ wav_padded.zero_()
139
+ pitch_padded.zero_()
140
+ for i in range(len(ids_sorted_decreasing)):
141
+ row = batch[ids_sorted_decreasing[i]]
142
+
143
+ text = row[0]
144
+ text_padded[i, :text.size(0), :] = text
145
+ text_lengths[i] = text.size(0)
146
+
147
+ spec = row[1]
148
+ spec_padded[i, :, :spec.size(1)] = spec
149
+ spec_lengths[i] = spec.size(1)
150
+
151
+ wav = row[2]
152
+ wav_padded[i, :, :wav.size(1)] = wav
153
+ wav_lengths[i] = wav.size(1)
154
+
155
+ pitch = row[3]
156
+ pitch_padded[i, :pitch.size(0)] = pitch
157
+
158
+ if self.return_ids:
159
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing, pitch_padded
160
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, pitch_padded
161
+
162
+
163
+ """Multi speaker version"""
164
+
165
+
166
+ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
167
+ """
168
+ 1) loads audio, speaker_id, text pairs
169
+ 2) normalizes text and converts them to sequences of integers
170
+ 3) computes spectrograms from audio files.
171
+ """
172
+
173
+ def __init__(self, audiopaths_sid_text, hparams):
174
+ self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
175
+ self.text_cleaners = hparams.text_cleaners
176
+ self.max_wav_value = hparams.max_wav_value
177
+ self.sampling_rate = hparams.sampling_rate
178
+ self.filter_length = hparams.filter_length
179
+ self.hop_length = hparams.hop_length
180
+ self.win_length = hparams.win_length
181
+ self.sampling_rate = hparams.sampling_rate
182
+
183
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
184
+
185
+ self.add_blank = hparams.add_blank
186
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
187
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
188
+
189
+ random.seed(1234)
190
+ random.shuffle(self.audiopaths_sid_text)
191
+ self._filter()
192
+
193
+ def _filter(self):
194
+ """
195
+ Filter text & store spec lengths
196
+ """
197
+ # Store spectrogram lengths for Bucketing
198
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
199
+ # spec_length = wav_length // hop_length
200
+
201
+ lengths = []
202
+ for audiopath, sid, text, pitch in self.audiopaths_sid_text:
203
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
204
+ self.lengths = lengths
205
+
206
+ def get_audio_text_speaker_pair(self, audiopath_sid_text):
207
+ # separate filename, speaker_id and text
208
+ audiopath, sid, text, pitch = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2], audiopath_sid_text[3]
209
+ text = self.get_text(text)
210
+ spec, wav = self.get_audio(audiopath)
211
+ sid = self.get_sid(sid)
212
+ pitch = self.get_pitch(pitch)
213
+
214
+ return (text, spec, wav, pitch, sid)
215
+
216
+ def get_audio(self, filename):
217
+ audio, sampling_rate = load_wav_to_torch(filename)
218
+ if sampling_rate != self.sampling_rate:
219
+ raise ValueError("{} {} SR doesn't match target {} SR".format(
220
+ sampling_rate, self.sampling_rate))
221
+ audio_norm = audio / self.max_wav_value
222
+ audio_norm = audio_norm.unsqueeze(0)
223
+ spec_filename = filename.replace(".wav", ".spec.pt")
224
+ if os.path.exists(spec_filename):
225
+ spec = torch.load(spec_filename)
226
+ else:
227
+ spec = spectrogram_torch(audio_norm, self.filter_length,
228
+ self.sampling_rate, self.hop_length, self.win_length,
229
+ center=False)
230
+ spec = torch.squeeze(spec, 0)
231
+ torch.save(spec, spec_filename)
232
+ return spec, audio_norm
233
+
234
+ def get_text(self, text):
235
+ soft = np.load(text)
236
+ text_norm = torch.FloatTensor(soft)
237
+ return text_norm
238
+
239
+ def get_pitch(self, pitch):
240
+ return torch.LongTensor(np.load(pitch))
241
+
242
+ def get_sid(self, sid):
243
+ sid = torch.LongTensor([int(sid)])
244
+ return sid
245
+
246
+ def __getitem__(self, index):
247
+ return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
248
+
249
+ def __len__(self):
250
+ return len(self.audiopaths_sid_text)
251
+
252
+
253
+ class TextAudioSpeakerCollate():
254
+ """ Zero-pads model inputs and targets
255
+ """
256
+
257
+ def __init__(self, return_ids=False):
258
+ self.return_ids = return_ids
259
+
260
+ def __call__(self, batch):
261
+ """Collate's training batch from normalized text, audio and speaker identities
262
+ PARAMS
263
+ ------
264
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
265
+ """
266
+ # Right zero-pad all one-hot text sequences to max input length
267
+ _, ids_sorted_decreasing = torch.sort(
268
+ torch.LongTensor([x[1].size(1) for x in batch]),
269
+ dim=0, descending=True)
270
+
271
+ max_text_len = max([len(x[0]) for x in batch])
272
+ max_spec_len = max([x[1].size(1) for x in batch])
273
+ max_wav_len = max([x[2].size(1) for x in batch])
274
+ max_pitch_len = max([x[3].shape[0] for x in batch])
275
+
276
+ text_lengths = torch.LongTensor(len(batch))
277
+ spec_lengths = torch.LongTensor(len(batch))
278
+ wav_lengths = torch.LongTensor(len(batch))
279
+ sid = torch.LongTensor(len(batch))
280
+
281
+ text_padded = torch.FloatTensor(len(batch), max_text_len, 256)
282
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
283
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
284
+ pitch_padded = torch.LongTensor(len(batch), max_pitch_len)
285
+
286
+ text_padded.zero_()
287
+ spec_padded.zero_()
288
+ wav_padded.zero_()
289
+ pitch_padded.zero_()
290
+
291
+ for i in range(len(ids_sorted_decreasing)):
292
+ row = batch[ids_sorted_decreasing[i]]
293
+
294
+ text = row[0]
295
+ text_padded[i, :text.size(0)] = text
296
+ text_lengths[i] = text.size(0)
297
+
298
+ spec = row[1]
299
+ spec_padded[i, :, :spec.size(1)] = spec
300
+ spec_lengths[i] = spec.size(1)
301
+
302
+ wav = row[2]
303
+ wav_padded[i, :, :wav.size(1)] = wav
304
+ wav_lengths[i] = wav.size(1)
305
+
306
+ pitch = row[3]
307
+ pitch_padded[i, :pitch.size(0)] = pitch
308
+
309
+ sid[i] = row[4]
310
+
311
+ if self.return_ids:
312
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, pitch_padded, sid, ids_sorted_decreasing
313
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths,pitch_padded , sid
314
+
315
+
316
+ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
317
+ """
318
+ Maintain similar input lengths in a batch.
319
+ Length groups are specified by boundaries.
320
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
321
+
322
+ It removes samples which are not included in the boundaries.
323
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
324
+ """
325
+
326
+ def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
327
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
328
+ self.lengths = dataset.lengths
329
+ self.batch_size = batch_size
330
+ self.boundaries = boundaries
331
+
332
+ self.buckets, self.num_samples_per_bucket = self._create_buckets()
333
+ self.total_size = sum(self.num_samples_per_bucket)
334
+ self.num_samples = self.total_size // self.num_replicas
335
+
336
+ def _create_buckets(self):
337
+ buckets = [[] for _ in range(len(self.boundaries) - 1)]
338
+ for i in range(len(self.lengths)):
339
+ length = self.lengths[i]
340
+ idx_bucket = self._bisect(length)
341
+ if idx_bucket != -1:
342
+ buckets[idx_bucket].append(i)
343
+
344
+ for i in range(len(buckets) - 1, 0, -1):
345
+ if len(buckets[i]) == 0:
346
+ buckets.pop(i)
347
+ self.boundaries.pop(i + 1)
348
+
349
+ num_samples_per_bucket = []
350
+ for i in range(len(buckets)):
351
+ len_bucket = len(buckets[i])
352
+ total_batch_size = self.num_replicas * self.batch_size
353
+ rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
354
+ num_samples_per_bucket.append(len_bucket + rem)
355
+ return buckets, num_samples_per_bucket
356
+
357
+ def __iter__(self):
358
+ # deterministically shuffle based on epoch
359
+ g = torch.Generator()
360
+ g.manual_seed(self.epoch)
361
+
362
+ indices = []
363
+ if self.shuffle:
364
+ for bucket in self.buckets:
365
+ indices.append(torch.randperm(len(bucket), generator=g).tolist())
366
+ else:
367
+ for bucket in self.buckets:
368
+ indices.append(list(range(len(bucket))))
369
+
370
+ batches = []
371
+ for i in range(len(self.buckets)):
372
+ bucket = self.buckets[i]
373
+ len_bucket = len(bucket)
374
+ ids_bucket = indices[i]
375
+ num_samples_bucket = self.num_samples_per_bucket[i]
376
+
377
+ # add extra samples to make it evenly divisible
378
+ rem = num_samples_bucket - len_bucket
379
+ ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
380
+
381
+ # subsample
382
+ ids_bucket = ids_bucket[self.rank::self.num_replicas]
383
+
384
+ # batching
385
+ for j in range(len(ids_bucket) // self.batch_size):
386
+ batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
387
+ batches.append(batch)
388
+
389
+ if self.shuffle:
390
+ batch_ids = torch.randperm(len(batches), generator=g).tolist()
391
+ batches = [batches[i] for i in batch_ids]
392
+ self.batches = batches
393
+
394
+ assert len(self.batches) * self.batch_size == self.num_samples
395
+ return iter(self.batches)
396
+
397
+ def _bisect(self, x, lo=0, hi=None):
398
+ if hi is None:
399
+ hi = len(self.boundaries) - 1
400
+
401
+ if hi > lo:
402
+ mid = (hi + lo) // 2
403
+ if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
404
+ return mid
405
+ elif x <= self.boundaries[mid]:
406
+ return self._bisect(x, lo, mid)
407
+ else:
408
+ return self._bisect(x, mid + 1, hi)
409
+ else:
410
+ return -1
411
+
412
+ def __len__(self):
413
+ return self.num_samples // self.batch_size
losses.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import commons
5
+
6
+
7
+ def feature_loss(fmap_r, fmap_g):
8
+ loss = 0
9
+ for dr, dg in zip(fmap_r, fmap_g):
10
+ for rl, gl in zip(dr, dg):
11
+ rl = rl.float().detach()
12
+ gl = gl.float()
13
+ loss += torch.mean(torch.abs(rl - gl))
14
+
15
+ return loss * 2
16
+
17
+
18
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
+ loss = 0
20
+ r_losses = []
21
+ g_losses = []
22
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
+ dr = dr.float()
24
+ dg = dg.float()
25
+ r_loss = torch.mean((1-dr)**2)
26
+ g_loss = torch.mean(dg**2)
27
+ loss += (r_loss + g_loss)
28
+ r_losses.append(r_loss.item())
29
+ g_losses.append(g_loss.item())
30
+
31
+ return loss, r_losses, g_losses
32
+
33
+
34
+ def generator_loss(disc_outputs):
35
+ loss = 0
36
+ gen_losses = []
37
+ for dg in disc_outputs:
38
+ dg = dg.float()
39
+ l = torch.mean((1-dg)**2)
40
+ gen_losses.append(l)
41
+ loss += l
42
+
43
+ return loss, gen_losses
44
+
45
+
46
+ def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
+ """
48
+ z_p, logs_q: [b, h, t_t]
49
+ m_p, logs_p: [b, h, t_t]
50
+ """
51
+ z_p = z_p.float()
52
+ logs_q = logs_q.float()
53
+ m_p = m_p.float()
54
+ logs_p = logs_p.float()
55
+ z_mask = z_mask.float()
56
+
57
+ kl = logs_p - logs_q - 0.5
58
+ kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
+ kl = torch.sum(kl * z_mask)
60
+ l = kl / torch.sum(z_mask)
61
+ return l
mel_processing.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import random
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+ import torch.utils.data
8
+ import numpy as np
9
+ import librosa
10
+ import librosa.util as librosa_util
11
+ from librosa.util import normalize, pad_center, tiny
12
+ from scipy.signal import get_window
13
+ from scipy.io.wavfile import read
14
+ from librosa.filters import mel as librosa_mel_fn
15
+
16
+ MAX_WAV_VALUE = 32768.0
17
+
18
+
19
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
20
+ """
21
+ PARAMS
22
+ ------
23
+ C: compression factor
24
+ """
25
+ return torch.log(torch.clamp(x, min=clip_val) * C)
26
+
27
+
28
+ def dynamic_range_decompression_torch(x, C=1):
29
+ """
30
+ PARAMS
31
+ ------
32
+ C: compression factor used to compress
33
+ """
34
+ return torch.exp(x) / C
35
+
36
+
37
+ def spectral_normalize_torch(magnitudes):
38
+ output = dynamic_range_compression_torch(magnitudes)
39
+ return output
40
+
41
+
42
+ def spectral_de_normalize_torch(magnitudes):
43
+ output = dynamic_range_decompression_torch(magnitudes)
44
+ return output
45
+
46
+
47
+ mel_basis = {}
48
+ hann_window = {}
49
+
50
+
51
+ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
52
+ if torch.min(y) < -1.:
53
+ print('min value is ', torch.min(y))
54
+ if torch.max(y) > 1.:
55
+ print('max value is ', torch.max(y))
56
+
57
+ global hann_window
58
+ dtype_device = str(y.dtype) + '_' + str(y.device)
59
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
60
+ if wnsize_dtype_device not in hann_window:
61
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
62
+
63
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
64
+ y = y.squeeze(1)
65
+
66
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
67
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
68
+
69
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
70
+ return spec
71
+
72
+
73
+ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
74
+ global mel_basis
75
+ dtype_device = str(spec.dtype) + '_' + str(spec.device)
76
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
77
+ if fmax_dtype_device not in mel_basis:
78
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
79
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
80
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
81
+ spec = spectral_normalize_torch(spec)
82
+ return spec
83
+
84
+
85
+ def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
86
+ if torch.min(y) < -1.:
87
+ print('min value is ', torch.min(y))
88
+ if torch.max(y) > 1.:
89
+ print('max value is ', torch.max(y))
90
+
91
+ global mel_basis, hann_window
92
+ dtype_device = str(y.dtype) + '_' + str(y.device)
93
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
94
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
95
+ if fmax_dtype_device not in mel_basis:
96
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
97
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
98
+ if wnsize_dtype_device not in hann_window:
99
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
100
+
101
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
102
+ y = y.squeeze(1)
103
+
104
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
105
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
106
+
107
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
108
+
109
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
110
+ spec = spectral_normalize_torch(spec)
111
+
112
+ return spec
models.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ import numpy as np
7
+ import commons
8
+ import modules
9
+ import attentions
10
+ import monotonic_align
11
+
12
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
13
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
14
+ from commons import init_weights, get_padding
15
+
16
+
17
+ class StochasticDurationPredictor(nn.Module):
18
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
19
+ super().__init__()
20
+ filter_channels = in_channels # it needs to be removed from future version.
21
+ self.in_channels = in_channels
22
+ self.filter_channels = filter_channels
23
+ self.kernel_size = kernel_size
24
+ self.p_dropout = p_dropout
25
+ self.n_flows = n_flows
26
+ self.gin_channels = gin_channels
27
+
28
+ self.log_flow = modules.Log()
29
+ self.flows = nn.ModuleList()
30
+ self.flows.append(modules.ElementwiseAffine(2))
31
+ for i in range(n_flows):
32
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
33
+ self.flows.append(modules.Flip())
34
+
35
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
36
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
37
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
38
+ self.post_flows = nn.ModuleList()
39
+ self.post_flows.append(modules.ElementwiseAffine(2))
40
+ for i in range(4):
41
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
42
+ self.post_flows.append(modules.Flip())
43
+
44
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
45
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
46
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
47
+ if gin_channels != 0:
48
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
49
+
50
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
51
+ x = torch.detach(x)
52
+ x = self.pre(x)
53
+ if g is not None:
54
+ g = torch.detach(g)
55
+ x = x + self.cond(g)
56
+ x = self.convs(x, x_mask)
57
+ x = self.proj(x) * x_mask
58
+
59
+ if not reverse:
60
+ flows = self.flows
61
+ assert w is not None
62
+
63
+ logdet_tot_q = 0
64
+ h_w = self.post_pre(w)
65
+ h_w = self.post_convs(h_w, x_mask)
66
+ h_w = self.post_proj(h_w) * x_mask
67
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
68
+ z_q = e_q
69
+ for flow in self.post_flows:
70
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
71
+ logdet_tot_q += logdet_q
72
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
73
+ u = torch.sigmoid(z_u) * x_mask
74
+ z0 = (w - u) * x_mask
75
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
76
+ logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
77
+
78
+ logdet_tot = 0
79
+ z0, logdet = self.log_flow(z0, x_mask)
80
+ logdet_tot += logdet
81
+ z = torch.cat([z0, z1], 1)
82
+ for flow in flows:
83
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
84
+ logdet_tot = logdet_tot + logdet
85
+ nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
86
+ return nll + logq # [b]
87
+ else:
88
+ flows = list(reversed(self.flows))
89
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
90
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
91
+ for flow in flows:
92
+ z = flow(z, x_mask, g=x, reverse=reverse)
93
+ z0, z1 = torch.split(z, [1, 1], 1)
94
+ logw = z0
95
+ return logw
96
+
97
+
98
+ class DurationPredictor(nn.Module):
99
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
100
+ super().__init__()
101
+
102
+ self.in_channels = in_channels
103
+ self.filter_channels = filter_channels
104
+ self.kernel_size = kernel_size
105
+ self.p_dropout = p_dropout
106
+ self.gin_channels = gin_channels
107
+
108
+ self.drop = nn.Dropout(p_dropout)
109
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
110
+ self.norm_1 = modules.LayerNorm(filter_channels)
111
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
112
+ self.norm_2 = modules.LayerNorm(filter_channels)
113
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
114
+
115
+ if gin_channels != 0:
116
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
117
+
118
+ def forward(self, x, x_mask, g=None):
119
+ x = torch.detach(x)
120
+ if g is not None:
121
+ g = torch.detach(g)
122
+ x = x + self.cond(g)
123
+ x = self.conv_1(x * x_mask)
124
+ x = torch.relu(x)
125
+ x = self.norm_1(x)
126
+ x = self.drop(x)
127
+ x = self.conv_2(x * x_mask)
128
+ x = torch.relu(x)
129
+ x = self.norm_2(x)
130
+ x = self.drop(x)
131
+ x = self.proj(x * x_mask)
132
+ return x * x_mask
133
+
134
+
135
+ class PitchPredictor(nn.Module):
136
+ def __init__(self,
137
+ n_vocab,
138
+ out_channels,
139
+ hidden_channels,
140
+ filter_channels,
141
+ n_heads,
142
+ n_layers,
143
+ kernel_size,
144
+ p_dropout):
145
+ super().__init__()
146
+ self.n_vocab = n_vocab # 音素的个数,中文和英文不同
147
+ self.out_channels = out_channels
148
+ self.hidden_channels = hidden_channels
149
+ self.filter_channels = filter_channels
150
+ self.n_heads = n_heads
151
+ self.n_layers = n_layers
152
+ self.kernel_size = kernel_size
153
+ self.p_dropout = p_dropout
154
+
155
+ self.pitch_net = attentions.Encoder(
156
+ hidden_channels,
157
+ filter_channels,
158
+ n_heads,
159
+ n_layers,
160
+ kernel_size,
161
+ p_dropout)
162
+ self.proj = nn.Conv1d(hidden_channels, 1, 1)
163
+
164
+ def forward(self, x, x_mask):
165
+ pitch_embedding = self.pitch_net(x * x_mask, x_mask)
166
+ pitch_embedding = pitch_embedding * x_mask
167
+ pred_pitch = self.proj(pitch_embedding)
168
+ return pred_pitch, pitch_embedding
169
+
170
+
171
+ class TextEncoder(nn.Module):
172
+ def __init__(self,
173
+ n_vocab,
174
+ out_channels,
175
+ hidden_channels,
176
+ filter_channels,
177
+ n_heads,
178
+ n_layers,
179
+ kernel_size,
180
+ p_dropout):
181
+ super().__init__()
182
+ self.n_vocab = n_vocab
183
+ self.out_channels = out_channels
184
+ self.hidden_channels = hidden_channels
185
+ self.filter_channels = filter_channels
186
+ self.n_heads = n_heads
187
+ self.n_layers = n_layers
188
+ self.kernel_size = kernel_size
189
+ self.p_dropout = p_dropout
190
+
191
+ # self.emb = nn.Embedding(n_vocab, hidden_channels)
192
+ # nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
193
+ self.emb_pitch = nn.Embedding(256, hidden_channels)
194
+ nn.init.normal_(self.emb_pitch.weight, 0.0, hidden_channels ** -0.5)
195
+
196
+ self.encoder = attentions.Encoder(
197
+ hidden_channels,
198
+ filter_channels,
199
+ n_heads,
200
+ n_layers,
201
+ kernel_size,
202
+ p_dropout)
203
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
204
+
205
+ def forward(self, x, x_lengths, pitch):
206
+ # x = x.transpose(1,2)
207
+ # x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
208
+ # print(x.shape)
209
+ x = x + self.emb_pitch(pitch)
210
+ x = torch.transpose(x, 1, -1) # [b, h, t]
211
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
212
+
213
+ x = self.encoder(x * x_mask, x_mask)
214
+ stats = self.proj(x) * x_mask
215
+
216
+ m, logs = torch.split(stats, self.out_channels, dim=1)
217
+ return x, m, logs, x_mask
218
+
219
+
220
+ class ResidualCouplingBlock(nn.Module):
221
+ def __init__(self,
222
+ channels,
223
+ hidden_channels,
224
+ kernel_size,
225
+ dilation_rate,
226
+ n_layers,
227
+ n_flows=4,
228
+ gin_channels=0):
229
+ super().__init__()
230
+ self.channels = channels
231
+ self.hidden_channels = hidden_channels
232
+ self.kernel_size = kernel_size
233
+ self.dilation_rate = dilation_rate
234
+ self.n_layers = n_layers
235
+ self.n_flows = n_flows
236
+ self.gin_channels = gin_channels
237
+
238
+ self.flows = nn.ModuleList()
239
+ for i in range(n_flows):
240
+ self.flows.append(
241
+ modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
242
+ gin_channels=gin_channels, mean_only=True))
243
+ self.flows.append(modules.Flip())
244
+
245
+ def forward(self, x, x_mask, g=None, reverse=False):
246
+ if not reverse:
247
+ for flow in self.flows:
248
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
249
+ else:
250
+ for flow in reversed(self.flows):
251
+ x = flow(x, x_mask, g=g, reverse=reverse)
252
+ return x
253
+
254
+
255
+ class PosteriorEncoder(nn.Module):
256
+ def __init__(self,
257
+ in_channels,
258
+ out_channels,
259
+ hidden_channels,
260
+ kernel_size,
261
+ dilation_rate,
262
+ n_layers,
263
+ gin_channels=0):
264
+ super().__init__()
265
+ self.in_channels = in_channels
266
+ self.out_channels = out_channels
267
+ self.hidden_channels = hidden_channels
268
+ self.kernel_size = kernel_size
269
+ self.dilation_rate = dilation_rate
270
+ self.n_layers = n_layers
271
+ self.gin_channels = gin_channels
272
+
273
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
274
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
275
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
276
+
277
+ def forward(self, x, x_lengths, g=None):
278
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
279
+ x = self.pre(x) * x_mask
280
+ x = self.enc(x, x_mask, g=g)
281
+ stats = self.proj(x) * x_mask
282
+ m, logs = torch.split(stats, self.out_channels, dim=1)
283
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
284
+ return z, m, logs, x_mask
285
+
286
+
287
+ class Generator(torch.nn.Module):
288
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
289
+ upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
290
+ super(Generator, self).__init__()
291
+ self.num_kernels = len(resblock_kernel_sizes)
292
+ self.num_upsamples = len(upsample_rates)
293
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
294
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
295
+
296
+ self.ups = nn.ModuleList()
297
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
298
+ self.ups.append(weight_norm(
299
+ ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
300
+ k, u, padding=(k - u) // 2)))
301
+
302
+ self.resblocks = nn.ModuleList()
303
+ for i in range(len(self.ups)):
304
+ ch = upsample_initial_channel // (2 ** (i + 1))
305
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
306
+ self.resblocks.append(resblock(ch, k, d))
307
+
308
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
309
+ self.ups.apply(init_weights)
310
+
311
+ if gin_channels != 0:
312
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
313
+
314
+ def forward(self, x, g=None):
315
+ x = self.conv_pre(x)
316
+ if g is not None:
317
+ x = x + self.cond(g)
318
+
319
+ for i in range(self.num_upsamples):
320
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
321
+ x = self.ups[i](x)
322
+ xs = None
323
+ for j in range(self.num_kernels):
324
+ if xs is None:
325
+ xs = self.resblocks[i * self.num_kernels + j](x)
326
+ else:
327
+ xs += self.resblocks[i * self.num_kernels + j](x)
328
+ x = xs / self.num_kernels
329
+ x = F.leaky_relu(x)
330
+ x = self.conv_post(x)
331
+ x = torch.tanh(x)
332
+
333
+ return x
334
+
335
+ def remove_weight_norm(self):
336
+ print('Removing weight norm...')
337
+ for l in self.ups:
338
+ remove_weight_norm(l)
339
+ for l in self.resblocks:
340
+ l.remove_weight_norm()
341
+
342
+
343
+ class DiscriminatorP(torch.nn.Module):
344
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
345
+ super(DiscriminatorP, self).__init__()
346
+ self.period = period
347
+ self.use_spectral_norm = use_spectral_norm
348
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
349
+ self.convs = nn.ModuleList([
350
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
351
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
352
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
353
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
354
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
355
+ ])
356
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
357
+
358
+ def forward(self, x):
359
+ fmap = []
360
+
361
+ # 1d to 2d
362
+ b, c, t = x.shape
363
+ if t % self.period != 0: # pad first
364
+ n_pad = self.period - (t % self.period)
365
+ x = F.pad(x, (0, n_pad), "reflect")
366
+ t = t + n_pad
367
+ x = x.view(b, c, t // self.period, self.period)
368
+
369
+ for l in self.convs:
370
+ x = l(x)
371
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
372
+ fmap.append(x)
373
+ x = self.conv_post(x)
374
+ fmap.append(x)
375
+ x = torch.flatten(x, 1, -1)
376
+
377
+ return x, fmap
378
+
379
+
380
+ class DiscriminatorS(torch.nn.Module):
381
+ def __init__(self, use_spectral_norm=False):
382
+ super(DiscriminatorS, self).__init__()
383
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
384
+ self.convs = nn.ModuleList([
385
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
386
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
387
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
388
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
389
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
390
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
391
+ ])
392
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
393
+
394
+ def forward(self, x):
395
+ fmap = []
396
+
397
+ for l in self.convs:
398
+ x = l(x)
399
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
400
+ fmap.append(x)
401
+ x = self.conv_post(x)
402
+ fmap.append(x)
403
+ x = torch.flatten(x, 1, -1)
404
+
405
+ return x, fmap
406
+
407
+
408
+ class MultiPeriodDiscriminator(torch.nn.Module):
409
+ def __init__(self, use_spectral_norm=False):
410
+ super(MultiPeriodDiscriminator, self).__init__()
411
+ periods = [2, 3, 5, 7, 11]
412
+
413
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
414
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
415
+ self.discriminators = nn.ModuleList(discs)
416
+
417
+ def forward(self, y, y_hat):
418
+ y_d_rs = []
419
+ y_d_gs = []
420
+ fmap_rs = []
421
+ fmap_gs = []
422
+ for i, d in enumerate(self.discriminators):
423
+ y_d_r, fmap_r = d(y)
424
+ y_d_g, fmap_g = d(y_hat)
425
+ y_d_rs.append(y_d_r)
426
+ y_d_gs.append(y_d_g)
427
+ fmap_rs.append(fmap_r)
428
+ fmap_gs.append(fmap_g)
429
+
430
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
431
+
432
+
433
+ class SynthesizerTrn(nn.Module):
434
+ """
435
+ Synthesizer for Training
436
+ """
437
+
438
+ def __init__(self,
439
+ n_vocab,
440
+ spec_channels,
441
+ segment_size,
442
+ inter_channels,
443
+ hidden_channels,
444
+ filter_channels,
445
+ n_heads,
446
+ n_layers,
447
+ kernel_size,
448
+ p_dropout,
449
+ resblock,
450
+ resblock_kernel_sizes,
451
+ resblock_dilation_sizes,
452
+ upsample_rates,
453
+ upsample_initial_channel,
454
+ upsample_kernel_sizes,
455
+ n_speakers=0,
456
+ gin_channels=0,
457
+ use_sdp=True,
458
+ **kwargs):
459
+
460
+ super().__init__()
461
+ self.n_vocab = n_vocab
462
+ self.spec_channels = spec_channels
463
+ self.inter_channels = inter_channels
464
+ self.hidden_channels = hidden_channels
465
+ self.filter_channels = filter_channels
466
+ self.n_heads = n_heads
467
+ self.n_layers = n_layers
468
+ self.kernel_size = kernel_size
469
+ self.p_dropout = p_dropout
470
+ self.resblock = resblock
471
+ self.resblock_kernel_sizes = resblock_kernel_sizes
472
+ self.resblock_dilation_sizes = resblock_dilation_sizes
473
+ self.upsample_rates = upsample_rates
474
+ self.upsample_initial_channel = upsample_initial_channel
475
+ self.upsample_kernel_sizes = upsample_kernel_sizes
476
+ self.segment_size = segment_size
477
+ self.n_speakers = n_speakers
478
+ self.gin_channels = gin_channels
479
+
480
+ self.use_sdp = use_sdp
481
+
482
+ self.enc_p = TextEncoder(n_vocab,
483
+ inter_channels,
484
+ hidden_channels,
485
+ filter_channels,
486
+ n_heads,
487
+ n_layers,
488
+ kernel_size,
489
+ p_dropout)
490
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
491
+ upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
492
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
493
+ gin_channels=gin_channels)
494
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
495
+ self.pitch_net = PitchPredictor(n_vocab, inter_channels, hidden_channels, filter_channels, n_heads, n_layers,
496
+ kernel_size, p_dropout)
497
+
498
+ if use_sdp:
499
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
500
+ else:
501
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
502
+
503
+ if n_speakers > 1:
504
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
505
+
506
+ def forward(self, x, x_lengths, y, y_lengths, pitch, sid=None):
507
+
508
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, pitch)
509
+ # print(f"x: {x.shape}")
510
+ pred_pitch, pitch_embedding = self.pitch_net(x, x_mask)
511
+ # print(f"pred_pitch: {pred_pitch.shape}")
512
+ # print(f"pitch_embedding: {pitch_embedding.shape}")
513
+ x = x + pitch_embedding
514
+ lf0 = torch.unsqueeze(pred_pitch, -1)
515
+ gt_lf0 = torch.log(440 * (2 ** ((pitch.float() - 69) / 12)))
516
+ gt_lf0 = gt_lf0.to(x.device)
517
+ x_mask_sum = torch.sum(x_mask)
518
+ lf0 = lf0.squeeze()
519
+ l_pitch = torch.sum((gt_lf0 - lf0) ** 2, 1) / x_mask_sum
520
+
521
+ if self.n_speakers > 0:
522
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
523
+ else:
524
+ g = None
525
+
526
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
527
+ # print(f"z: {z.shape}")
528
+
529
+ z_p = self.flow(z, y_mask, g=g)
530
+ # print(f"z_p: {z_p.shape}")
531
+
532
+ with torch.no_grad():
533
+ # negative cross-entropy
534
+ s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
535
+ neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
536
+ neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),
537
+ s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
538
+ neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
539
+ neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
540
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
541
+
542
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
543
+ attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
544
+
545
+ w = attn.sum(2)
546
+ if self.use_sdp:
547
+ l_length = self.dp(x, x_mask, w, g=g)
548
+ l_length = l_length / torch.sum(x_mask)
549
+ else:
550
+ logw_ = torch.log(w + 1e-6) * x_mask
551
+ logw = self.dp(x, x_mask, g=g)
552
+ l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging
553
+
554
+ # expand prior
555
+ # print()
556
+ # print(f"attn: {attn.shape}")
557
+ # print(f"m_p: {m_p.shape}")
558
+ # print(f"logs_p: {logs_p.shape}")
559
+
560
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
561
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
562
+ # print(f"m_p: {m_p.shape}")
563
+ # print(f"logs_p: {logs_p.shape}")
564
+
565
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
566
+ # print(f"z_slice: {z_slice.shape}")
567
+
568
+ o = self.dec(z_slice, g=g)
569
+ return o, l_length, l_pitch, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
570
+
571
+ def infer(self, x, x_lengths, pitch, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
572
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, pitch)
573
+ pred_pitch, pitch_embedding = self.pitch_net(x, x_mask)
574
+ x = x + pitch_embedding
575
+ if self.n_speakers > 0:
576
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
577
+ else:
578
+ g = None
579
+
580
+ if self.use_sdp:
581
+ logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
582
+ else:
583
+ logw = self.dp(x, x_mask, g=g)
584
+ w = torch.exp(logw) * x_mask * length_scale
585
+ w_ceil = torch.ceil(w)
586
+
587
+ w_ceil = w_ceil * 0 + 2
588
+ # for index in range(w_ceil.shape[2]):
589
+ # if index%4 == 0:
590
+ # w_ceil[0,0,index] = 1.0
591
+
592
+ for i in range(w_ceil.shape[2]):
593
+ sep = 1 / 0.14
594
+ if i * sep >= w_ceil.shape[2] * 2:
595
+ break
596
+ w_ceil[0, 0, int(i * sep / 2)] = 1
597
+
598
+ # print(w_ceil)
599
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
600
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
601
+
602
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
603
+
604
+ attn = commons.generate_path(w_ceil, attn_mask)
605
+
606
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
607
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
608
+ 2) # [b, t', t], [b, t, d] -> [b, d, t']
609
+
610
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
611
+
612
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
613
+ o = self.dec((z * y_mask)[:, :, :max_len], g=g)
614
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
615
+
616
+ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
617
+ assert self.n_speakers > 0, "n_speakers have to be larger than 0."
618
+ g_src = self.emb_g(sid_src).unsqueeze(-1)
619
+ g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
620
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
621
+ z_p = self.flow(z, y_mask, g=g_src)
622
+ z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
623
+ o_hat = self.dec(z_hat * y_mask, g=g_tgt)
624
+ return o_hat, y_mask, (z, z_p, z_hat)
625
+
modules.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ import commons
13
+ from commons import init_weights, get_padding
14
+ from transforms import piecewise_rational_quadratic_transform
15
+
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
+ super().__init__()
38
+ self.in_channels = in_channels
39
+ self.hidden_channels = hidden_channels
40
+ self.out_channels = out_channels
41
+ self.kernel_size = kernel_size
42
+ self.n_layers = n_layers
43
+ self.p_dropout = p_dropout
44
+ assert n_layers > 1, "Number of layers should be larger than 0."
45
+
46
+ self.conv_layers = nn.ModuleList()
47
+ self.norm_layers = nn.ModuleList()
48
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = nn.Sequential(
51
+ nn.ReLU(),
52
+ nn.Dropout(p_dropout))
53
+ for _ in range(n_layers-1):
54
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DDSConv(nn.Module):
71
+ """
72
+ Dialted and Depth-Separable Convolution
73
+ """
74
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
+ super().__init__()
76
+ self.channels = channels
77
+ self.kernel_size = kernel_size
78
+ self.n_layers = n_layers
79
+ self.p_dropout = p_dropout
80
+
81
+ self.drop = nn.Dropout(p_dropout)
82
+ self.convs_sep = nn.ModuleList()
83
+ self.convs_1x1 = nn.ModuleList()
84
+ self.norms_1 = nn.ModuleList()
85
+ self.norms_2 = nn.ModuleList()
86
+ for i in range(n_layers):
87
+ dilation = kernel_size ** i
88
+ padding = (kernel_size * dilation - dilation) // 2
89
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
+ groups=channels, dilation=dilation, padding=padding
91
+ ))
92
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
+ self.norms_1.append(LayerNorm(channels))
94
+ self.norms_2.append(LayerNorm(channels))
95
+
96
+ def forward(self, x, x_mask, g=None):
97
+ if g is not None:
98
+ x = x + g
99
+ for i in range(self.n_layers):
100
+ y = self.convs_sep[i](x * x_mask)
101
+ y = self.norms_1[i](y)
102
+ y = F.gelu(y)
103
+ y = self.convs_1x1[i](y)
104
+ y = self.norms_2[i](y)
105
+ y = F.gelu(y)
106
+ y = self.drop(y)
107
+ x = x + y
108
+ return x * x_mask
109
+
110
+
111
+ class WN(torch.nn.Module):
112
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
+ super(WN, self).__init__()
114
+ assert(kernel_size % 2 == 1)
115
+ self.hidden_channels =hidden_channels
116
+ self.kernel_size = kernel_size,
117
+ self.dilation_rate = dilation_rate
118
+ self.n_layers = n_layers
119
+ self.gin_channels = gin_channels
120
+ self.p_dropout = p_dropout
121
+
122
+ self.in_layers = torch.nn.ModuleList()
123
+ self.res_skip_layers = torch.nn.ModuleList()
124
+ self.drop = nn.Dropout(p_dropout)
125
+
126
+ if gin_channels != 0:
127
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
+
130
+ for i in range(n_layers):
131
+ dilation = dilation_rate ** i
132
+ padding = int((kernel_size * dilation - dilation) / 2)
133
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
+ dilation=dilation, padding=padding)
135
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
+ self.in_layers.append(in_layer)
137
+
138
+ # last one is not necessary
139
+ if i < n_layers - 1:
140
+ res_skip_channels = 2 * hidden_channels
141
+ else:
142
+ res_skip_channels = hidden_channels
143
+
144
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
+ self.res_skip_layers.append(res_skip_layer)
147
+
148
+ def forward(self, x, x_mask, g=None, **kwargs):
149
+ output = torch.zeros_like(x)
150
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
+
152
+ if g is not None:
153
+ g = self.cond_layer(g)
154
+
155
+ for i in range(self.n_layers):
156
+ x_in = self.in_layers[i](x)
157
+ if g is not None:
158
+ cond_offset = i * 2 * self.hidden_channels
159
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
+ else:
161
+ g_l = torch.zeros_like(x_in)
162
+
163
+ acts = commons.fused_add_tanh_sigmoid_multiply(
164
+ x_in,
165
+ g_l,
166
+ n_channels_tensor)
167
+ acts = self.drop(acts)
168
+
169
+ res_skip_acts = self.res_skip_layers[i](acts)
170
+ if i < self.n_layers - 1:
171
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
+ x = (x + res_acts) * x_mask
173
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
174
+ else:
175
+ output = output + res_skip_acts
176
+ return output * x_mask
177
+
178
+ def remove_weight_norm(self):
179
+ if self.gin_channels != 0:
180
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
181
+ for l in self.in_layers:
182
+ torch.nn.utils.remove_weight_norm(l)
183
+ for l in self.res_skip_layers:
184
+ torch.nn.utils.remove_weight_norm(l)
185
+
186
+
187
+ class ResBlock1(torch.nn.Module):
188
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
+ super(ResBlock1, self).__init__()
190
+ self.convs1 = nn.ModuleList([
191
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
+ padding=get_padding(kernel_size, dilation[0]))),
193
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
+ padding=get_padding(kernel_size, dilation[1]))),
195
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
+ padding=get_padding(kernel_size, dilation[2])))
197
+ ])
198
+ self.convs1.apply(init_weights)
199
+
200
+ self.convs2 = nn.ModuleList([
201
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
+ padding=get_padding(kernel_size, 1))),
203
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
+ padding=get_padding(kernel_size, 1))),
205
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
+ padding=get_padding(kernel_size, 1)))
207
+ ])
208
+ self.convs2.apply(init_weights)
209
+
210
+ def forward(self, x, x_mask=None):
211
+ for c1, c2 in zip(self.convs1, self.convs2):
212
+ xt = F.leaky_relu(x, LRELU_SLOPE)
213
+ if x_mask is not None:
214
+ xt = xt * x_mask
215
+ xt = c1(xt)
216
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
217
+ if x_mask is not None:
218
+ xt = xt * x_mask
219
+ xt = c2(xt)
220
+ x = xt + x
221
+ if x_mask is not None:
222
+ x = x * x_mask
223
+ return x
224
+
225
+ def remove_weight_norm(self):
226
+ for l in self.convs1:
227
+ remove_weight_norm(l)
228
+ for l in self.convs2:
229
+ remove_weight_norm(l)
230
+
231
+
232
+ class ResBlock2(torch.nn.Module):
233
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
+ super(ResBlock2, self).__init__()
235
+ self.convs = nn.ModuleList([
236
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
+ padding=get_padding(kernel_size, dilation[0]))),
238
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
+ padding=get_padding(kernel_size, dilation[1])))
240
+ ])
241
+ self.convs.apply(init_weights)
242
+
243
+ def forward(self, x, x_mask=None):
244
+ for c in self.convs:
245
+ xt = F.leaky_relu(x, LRELU_SLOPE)
246
+ if x_mask is not None:
247
+ xt = xt * x_mask
248
+ xt = c(xt)
249
+ x = xt + x
250
+ if x_mask is not None:
251
+ x = x * x_mask
252
+ return x
253
+
254
+ def remove_weight_norm(self):
255
+ for l in self.convs:
256
+ remove_weight_norm(l)
257
+
258
+
259
+ class Log(nn.Module):
260
+ def forward(self, x, x_mask, reverse=False, **kwargs):
261
+ if not reverse:
262
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
+ logdet = torch.sum(-y, [1, 2])
264
+ return y, logdet
265
+ else:
266
+ x = torch.exp(x) * x_mask
267
+ return x
268
+
269
+
270
+ class Flip(nn.Module):
271
+ def forward(self, x, *args, reverse=False, **kwargs):
272
+ x = torch.flip(x, [1])
273
+ if not reverse:
274
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
+ return x, logdet
276
+ else:
277
+ return x
278
+
279
+
280
+ class ElementwiseAffine(nn.Module):
281
+ def __init__(self, channels):
282
+ super().__init__()
283
+ self.channels = channels
284
+ self.m = nn.Parameter(torch.zeros(channels,1))
285
+ self.logs = nn.Parameter(torch.zeros(channels,1))
286
+
287
+ def forward(self, x, x_mask, reverse=False, **kwargs):
288
+ if not reverse:
289
+ y = self.m + torch.exp(self.logs) * x
290
+ y = y * x_mask
291
+ logdet = torch.sum(self.logs * x_mask, [1,2])
292
+ return y, logdet
293
+ else:
294
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
+ return x
296
+
297
+
298
+ class ResidualCouplingLayer(nn.Module):
299
+ def __init__(self,
300
+ channels,
301
+ hidden_channels,
302
+ kernel_size,
303
+ dilation_rate,
304
+ n_layers,
305
+ p_dropout=0,
306
+ gin_channels=0,
307
+ mean_only=False):
308
+ assert channels % 2 == 0, "channels should be divisible by 2"
309
+ super().__init__()
310
+ self.channels = channels
311
+ self.hidden_channels = hidden_channels
312
+ self.kernel_size = kernel_size
313
+ self.dilation_rate = dilation_rate
314
+ self.n_layers = n_layers
315
+ self.half_channels = channels // 2
316
+ self.mean_only = mean_only
317
+
318
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
+ self.post.weight.data.zero_()
322
+ self.post.bias.data.zero_()
323
+
324
+ def forward(self, x, x_mask, g=None, reverse=False):
325
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
+ h = self.pre(x0) * x_mask
327
+ h = self.enc(h, x_mask, g=g)
328
+ stats = self.post(h) * x_mask
329
+ if not self.mean_only:
330
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
+ else:
332
+ m = stats
333
+ logs = torch.zeros_like(m)
334
+
335
+ if not reverse:
336
+ x1 = m + x1 * torch.exp(logs) * x_mask
337
+ x = torch.cat([x0, x1], 1)
338
+ logdet = torch.sum(logs, [1,2])
339
+ return x, logdet
340
+ else:
341
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
+ x = torch.cat([x0, x1], 1)
343
+ return x
344
+
345
+
346
+ class ConvFlow(nn.Module):
347
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
+ super().__init__()
349
+ self.in_channels = in_channels
350
+ self.filter_channels = filter_channels
351
+ self.kernel_size = kernel_size
352
+ self.n_layers = n_layers
353
+ self.num_bins = num_bins
354
+ self.tail_bound = tail_bound
355
+ self.half_channels = in_channels // 2
356
+
357
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
+ self.proj.weight.data.zero_()
361
+ self.proj.bias.data.zero_()
362
+
363
+ def forward(self, x, x_mask, g=None, reverse=False):
364
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
+ h = self.pre(x0)
366
+ h = self.convs(h, x_mask, g=g)
367
+ h = self.proj(h) * x_mask
368
+
369
+ b, c, t = x0.shape
370
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
+
372
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
+ unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
+
376
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
+ unnormalized_widths,
378
+ unnormalized_heights,
379
+ unnormalized_derivatives,
380
+ inverse=reverse,
381
+ tails='linear',
382
+ tail_bound=self.tail_bound
383
+ )
384
+
385
+ x = torch.cat([x0, x1], 1) * x_mask
386
+ logdet = torch.sum(logabsdet * x_mask, [1,2])
387
+ if not reverse:
388
+ return x, logdet
389
+ else:
390
+ return x
monotonic_align/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from .monotonic_align.core import maximum_path_c
4
+
5
+
6
+ def maximum_path(neg_cent, mask):
7
+ """ Cython optimized version.
8
+ neg_cent: [b, t_t, t_s]
9
+ mask: [b, t_t, t_s]
10
+ """
11
+ device = neg_cent.device
12
+ dtype = neg_cent.dtype
13
+ neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
14
+ path = np.zeros(neg_cent.shape, dtype=np.int32)
15
+
16
+ t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
17
+ t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
18
+ maximum_path_c(path, neg_cent, t_t_max, t_s_max)
19
+ return torch.from_numpy(path).to(device=device, dtype=dtype)
monotonic_align/core.pyx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport cython
2
+ from cython.parallel import prange
3
+
4
+
5
+ @cython.boundscheck(False)
6
+ @cython.wraparound(False)
7
+ cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil:
8
+ cdef int x
9
+ cdef int y
10
+ cdef float v_prev
11
+ cdef float v_cur
12
+ cdef float tmp
13
+ cdef int index = t_x - 1
14
+
15
+ for y in range(t_y):
16
+ for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
17
+ if x == y:
18
+ v_cur = max_neg_val
19
+ else:
20
+ v_cur = value[y-1, x]
21
+ if x == 0:
22
+ if y == 0:
23
+ v_prev = 0.
24
+ else:
25
+ v_prev = max_neg_val
26
+ else:
27
+ v_prev = value[y-1, x-1]
28
+ value[y, x] += max(v_prev, v_cur)
29
+
30
+ for y in range(t_y - 1, -1, -1):
31
+ path[y, index] = 1
32
+ if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
33
+ index = index - 1
34
+
35
+
36
+ @cython.boundscheck(False)
37
+ @cython.wraparound(False)
38
+ cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil:
39
+ cdef int b = paths.shape[0]
40
+ cdef int i
41
+ for i in prange(b, nogil=True):
42
+ maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
monotonic_align/setup.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from distutils.core import setup
2
+ from Cython.Build import cythonize
3
+ import numpy
4
+
5
+ setup(
6
+ name = 'monotonic_align',
7
+ ext_modules = cythonize("core.pyx"),
8
+ include_dirs=[numpy.get_include()]
9
+ )
preprocess.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import text
3
+ from utils import load_filepaths_and_text
4
+
5
+ if __name__ == '__main__':
6
+ parser = argparse.ArgumentParser()
7
+ parser.add_argument("--out_extension", default="cleaned")
8
+ parser.add_argument("--text_index", default=1, type=int)
9
+ parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
10
+ parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
11
+
12
+ args = parser.parse_args()
13
+
14
+
15
+ for filelist in args.filelists:
16
+ print("START:", filelist)
17
+ filepaths_and_text = load_filepaths_and_text(filelist)
18
+ for i in range(len(filepaths_and_text)):
19
+ original_text = filepaths_and_text[i][args.text_index]
20
+ cleaned_text = text._clean_text(original_text, args.text_cleaners)
21
+ filepaths_and_text[i][args.text_index] = cleaned_text
22
+
23
+ new_filelist = filelist + "." + args.out_extension
24
+ with open(new_filelist, "w", encoding="utf-8") as f:
25
+ f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
preprocess_wave.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import librosa
3
+ import pyworld
4
+ import utils
5
+ import numpy as np
6
+ from scipy.io import wavfile
7
+
8
+
9
+ class FeatureInput(object):
10
+ def __init__(self, samplerate=16000, hop_size=160):
11
+ self.fs = samplerate
12
+ self.hop = hop_size
13
+
14
+ self.f0_bin = 256
15
+ self.f0_max = 1100.0
16
+ self.f0_min = 50.0
17
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
18
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
19
+
20
+ def compute_f0(self, path):
21
+ x, sr = librosa.load(path, sr=self.fs)
22
+ assert sr == self.fs
23
+ f0, t = pyworld.dio(
24
+ x.astype(np.double),
25
+ fs=sr,
26
+ f0_ceil=800,
27
+ frame_period=1000 * self.hop / sr,
28
+ )
29
+ f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
30
+ for index, pitch in enumerate(f0):
31
+ f0[index] = round(pitch, 1)
32
+ return f0
33
+
34
+ # for numpy # code from diffsinger
35
+ def coarse_f0(self, f0):
36
+ f0_mel = 1127 * np.log(1 + f0 / 700)
37
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * (
38
+ self.f0_bin - 2
39
+ ) / (self.f0_mel_max - self.f0_mel_min) + 1
40
+
41
+ # use 0 or 1
42
+ f0_mel[f0_mel <= 1] = 1
43
+ f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1
44
+ f0_coarse = np.rint(f0_mel).astype(np.int)
45
+ assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
46
+ f0_coarse.max(),
47
+ f0_coarse.min(),
48
+ )
49
+ return f0_coarse
50
+
51
+ # for tensor # code from diffsinger
52
+ def coarse_f0_ts(self, f0):
53
+ f0_mel = 1127 * (1 + f0 / 700).log()
54
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * (
55
+ self.f0_bin - 2
56
+ ) / (self.f0_mel_max - self.f0_mel_min) + 1
57
+
58
+ # use 0 or 1
59
+ f0_mel[f0_mel <= 1] = 1
60
+ f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1
61
+ f0_coarse = (f0_mel + 0.5).long()
62
+ assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
63
+ f0_coarse.max(),
64
+ f0_coarse.min(),
65
+ )
66
+ return f0_coarse
67
+
68
+ def save_wav(self, wav, path):
69
+ wav *= 32767 / max(0.01, np.max(np.abs(wav))) * 0.6
70
+ wavfile.write(path, self.fs, wav.astype(np.int16))
71
+
72
+
73
+ if __name__ == "__main__":
74
+ wavPath = "./data/waves"
75
+ outPath = "./data/label"
76
+ if not os.path.exists("./data/label"):
77
+ os.mkdir("./data/label")
78
+
79
+ # define model and load checkpoint
80
+ hps = utils.get_hparams_from_file("./configs/singing_base.json")
81
+ featureInput = FeatureInput(hps.data.sampling_rate, hps.data.hop_length)
82
+ vits_file = open("./filelists/vc_file.txt", "w", encoding="utf-8")
83
+
84
+ for spks in os.listdir(wavPath):
85
+ if os.path.isdir(f"./{wavPath}/{spks}"):
86
+ os.makedirs(f"./{outPath}/{spks}")
87
+ for file in os.listdir(f"./{wavPath}/{spks}"):
88
+ if file.endswith(".wav"):
89
+ file = file[:-4]
90
+ audio_path = f"./{wavPath}/{spks}/{file}.wav"
91
+ featur_pit = featureInput.compute_f0(audio_path)
92
+ coarse_pit = featureInput.coarse_f0(featur_pit)
93
+ np.save(
94
+ f"{outPath}/{spks}/{file}_pitch.npy",
95
+ coarse_pit,
96
+ allow_pickle=False,
97
+ )
98
+ np.save(
99
+ f"{outPath}/{spks}/{file}_nsff0.npy",
100
+ featur_pit,
101
+ allow_pickle=False,
102
+ )
103
+
104
+ path_audio = f"./data/waves/{spks}/{file}.wav"
105
+ path_spkid = f"./data/spkid/{spks}.npy"
106
+ path_label = (
107
+ f"./data/phone/{spks}/{file}.npy" # phone means ppg & hubert
108
+ )
109
+ path_pitch = f"./data/label/{spks}/{file}_pitch.npy"
110
+ path_nsff0 = f"./data/label/{spks}/{file}_nsff0.npy"
111
+ print(
112
+ f"{path_audio}|{path_spkid}|{path_label}|{path_pitch}|{path_nsff0}",
113
+ file=vits_file,
114
+ )
115
+
116
+ vits_file.close()
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cython==0.29.21
2
+ librosa==0.8.0
3
+ matplotlib==3.3.1
4
+ numpy==1.18.5
5
+ phonemizer==2.2.1
6
+ scipy==1.5.2
7
+ tensorboard==2.3.0
8
+ torch
9
+ torchvision
10
+ Unidecode==1.1.1
11
+ torchaudio
12
+ pyworld
text/LICENSE ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2017 Keith Ito
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in
11
+ all copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ THE SOFTWARE.
text/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+ from text import cleaners
3
+ from text.symbols import symbols
4
+
5
+
6
+ # Mappings from symbol to numeric ID and vice versa:
7
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
+ _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
+
10
+
11
+ def text_to_sequence(text, cleaner_names):
12
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
+ Args:
14
+ text: string to convert to a sequence
15
+ cleaner_names: names of the cleaner functions to run the text through
16
+ Returns:
17
+ List of integers corresponding to the symbols in the text
18
+ '''
19
+ sequence = []
20
+
21
+ clean_text = _clean_text(text, cleaner_names)
22
+ for symbol in clean_text:
23
+ symbol_id = _symbol_to_id[symbol]
24
+ sequence += [symbol_id]
25
+ return sequence
26
+
27
+
28
+ def cleaned_text_to_sequence(cleaned_text):
29
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
30
+ Args:
31
+ text: string to convert to a sequence
32
+ Returns:
33
+ List of integers corresponding to the symbols in the text
34
+ '''
35
+ sequence = [_symbol_to_id[symbol] for symbol in cleaned_text]
36
+ return sequence
37
+
38
+
39
+ def sequence_to_text(sequence):
40
+ '''Converts a sequence of IDs back to a string'''
41
+ result = ''
42
+ for symbol_id in sequence:
43
+ s = _id_to_symbol[symbol_id]
44
+ result += s
45
+ return result
46
+
47
+
48
+ def _clean_text(text, cleaner_names):
49
+ for name in cleaner_names:
50
+ cleaner = getattr(cleaners, name)
51
+ if not cleaner:
52
+ raise Exception('Unknown cleaner: %s' % name)
53
+ text = cleaner(text)
54
+ return text
text/cleaners.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+
3
+ '''
4
+ Cleaners are transformations that run over the input text at both training and eval time.
5
+
6
+ Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
+ hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
+ 1. "english_cleaners" for English text
9
+ 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
+ the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
+ 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
+ the symbols in symbols.py to match your data).
13
+ '''
14
+
15
+ import re
16
+ from unidecode import unidecode
17
+ from phonemizer import phonemize
18
+
19
+
20
+ # Regular expression matching whitespace:
21
+ _whitespace_re = re.compile(r'\s+')
22
+
23
+ # List of (regular expression, replacement) pairs for abbreviations:
24
+ _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
25
+ ('mrs', 'misess'),
26
+ ('mr', 'mister'),
27
+ ('dr', 'doctor'),
28
+ ('st', 'saint'),
29
+ ('co', 'company'),
30
+ ('jr', 'junior'),
31
+ ('maj', 'major'),
32
+ ('gen', 'general'),
33
+ ('drs', 'doctors'),
34
+ ('rev', 'reverend'),
35
+ ('lt', 'lieutenant'),
36
+ ('hon', 'honorable'),
37
+ ('sgt', 'sergeant'),
38
+ ('capt', 'captain'),
39
+ ('esq', 'esquire'),
40
+ ('ltd', 'limited'),
41
+ ('col', 'colonel'),
42
+ ('ft', 'fort'),
43
+ ]]
44
+
45
+
46
+ def expand_abbreviations(text):
47
+ for regex, replacement in _abbreviations:
48
+ text = re.sub(regex, replacement, text)
49
+ return text
50
+
51
+
52
+ def expand_numbers(text):
53
+ return normalize_numbers(text)
54
+
55
+
56
+ def lowercase(text):
57
+ return text.lower()
58
+
59
+
60
+ def collapse_whitespace(text):
61
+ return re.sub(_whitespace_re, ' ', text)
62
+
63
+
64
+ def convert_to_ascii(text):
65
+ return unidecode(text)
66
+
67
+
68
+ def basic_cleaners(text):
69
+ '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
70
+ text = lowercase(text)
71
+ text = collapse_whitespace(text)
72
+ return text
73
+
74
+
75
+ def transliteration_cleaners(text):
76
+ '''Pipeline for non-English text that transliterates to ASCII.'''
77
+ text = convert_to_ascii(text)
78
+ text = lowercase(text)
79
+ text = collapse_whitespace(text)
80
+ return text
81
+
82
+
83
+ def english_cleaners(text):
84
+ '''Pipeline for English text, including abbreviation expansion.'''
85
+ text = convert_to_ascii(text)
86
+ text = lowercase(text)
87
+ text = expand_abbreviations(text)
88
+ phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
89
+ phonemes = collapse_whitespace(phonemes)
90
+ return phonemes
91
+
92
+
93
+ def english_cleaners2(text):
94
+ '''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
95
+ text = convert_to_ascii(text)
96
+ text = lowercase(text)
97
+ text = expand_abbreviations(text)
98
+ phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=True)
99
+ phonemes = collapse_whitespace(phonemes)
100
+ return phonemes
text/symbols.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+
3
+ '''
4
+ Defines the set of symbols used in text input to the model.
5
+ '''
6
+ _pad = '_'
7
+ _punctuation = ';:,.!?¡¿—…"«»“” '
8
+ _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
9
+ _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
10
+
11
+
12
+ # Export all symbols:
13
+ symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
14
+
15
+ # Special symbol ids
16
+ SPACE_ID = symbols.index(" ")
train.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ import itertools
5
+ import math
6
+ import torch
7
+ from torch import nn, optim
8
+ from torch.nn import functional as F
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.tensorboard import SummaryWriter
11
+ import torch.multiprocessing as mp
12
+ import torch.distributed as dist
13
+ from torch.nn.parallel import DistributedDataParallel as DDP
14
+ from torch.cuda.amp import autocast, GradScaler
15
+
16
+ import librosa
17
+ import logging
18
+
19
+ logging.getLogger('numba').setLevel(logging.WARNING)
20
+
21
+ import commons
22
+ import utils
23
+ from data_utils import (
24
+ TextAudioLoader,
25
+ TextAudioCollate,
26
+ DistributedBucketSampler
27
+ )
28
+ from models import (
29
+ SynthesizerTrn,
30
+ MultiPeriodDiscriminator,
31
+ )
32
+ from losses import (
33
+ generator_loss,
34
+ discriminator_loss,
35
+ feature_loss,
36
+ kl_loss
37
+ )
38
+ from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
39
+ from text.symbols import symbols
40
+
41
+
42
+ torch.backends.cudnn.benchmark = True
43
+ global_step = 0
44
+
45
+
46
+ def main():
47
+ """Assume Single Node Multi GPUs Training Only"""
48
+ assert torch.cuda.is_available(), "CPU training is not allowed."
49
+
50
+ n_gpus = torch.cuda.device_count()
51
+ os.environ['MASTER_ADDR'] = 'localhost'
52
+ os.environ['MASTER_PORT'] = '25565'
53
+
54
+ hps = utils.get_hparams()
55
+ mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
56
+
57
+
58
+ def run(rank, n_gpus, hps):
59
+ global global_step
60
+ if rank == 0:
61
+ logger = utils.get_logger(hps.model_dir)
62
+ logger.info(hps)
63
+ utils.check_git_hash(hps.model_dir)
64
+ writer = SummaryWriter(log_dir=hps.model_dir)
65
+ writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
66
+
67
+ dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
68
+ torch.manual_seed(hps.train.seed)
69
+ torch.cuda.set_device(rank)
70
+
71
+ train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
72
+ train_sampler = DistributedBucketSampler(
73
+ train_dataset,
74
+ hps.train.batch_size,
75
+ [32,300,400,500,600,700,800,900,1000],
76
+ num_replicas=n_gpus,
77
+ rank=rank,
78
+ shuffle=True)
79
+ collate_fn = TextAudioCollate()
80
+ train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
81
+ collate_fn=collate_fn, batch_sampler=train_sampler)
82
+ if rank == 0:
83
+ eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data)
84
+ eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
85
+ batch_size=hps.train.batch_size, pin_memory=True,
86
+ drop_last=False, collate_fn=collate_fn)
87
+
88
+ net_g = SynthesizerTrn(
89
+ len(symbols),
90
+ hps.data.filter_length // 2 + 1,
91
+ hps.train.segment_size // hps.data.hop_length,
92
+ **hps.model).cuda(rank)
93
+ net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
94
+ optim_g = torch.optim.AdamW(
95
+ net_g.parameters(),
96
+ hps.train.learning_rate,
97
+ betas=hps.train.betas,
98
+ eps=hps.train.eps)
99
+ optim_d = torch.optim.AdamW(
100
+ net_d.parameters(),
101
+ hps.train.learning_rate,
102
+ betas=hps.train.betas,
103
+ eps=hps.train.eps)
104
+ net_g = DDP(net_g, device_ids=[rank])
105
+ net_d = DDP(net_d, device_ids=[rank])
106
+
107
+ try:
108
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
109
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
110
+ global_step = (epoch_str - 1) * len(train_loader)
111
+ except:
112
+ epoch_str = 1
113
+ global_step = 0
114
+
115
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
116
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
117
+
118
+ scaler = GradScaler(enabled=hps.train.fp16_run)
119
+
120
+ for epoch in range(epoch_str, hps.train.epochs + 1):
121
+ if rank==0:
122
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
123
+ else:
124
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
125
+ scheduler_g.step()
126
+ scheduler_d.step()
127
+
128
+
129
+ def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
130
+ net_g, net_d = nets
131
+ optim_g, optim_d = optims
132
+ scheduler_g, scheduler_d = schedulers
133
+ train_loader, eval_loader = loaders
134
+ if writers is not None:
135
+ writer, writer_eval = writers
136
+
137
+ train_loader.batch_sampler.set_epoch(epoch)
138
+ global global_step
139
+
140
+ net_g.train()
141
+ net_d.train()
142
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, pitch) in enumerate(train_loader):
143
+ x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
144
+ spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
145
+ y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
146
+ pitch = pitch.cuda(rank, non_blocking=True)
147
+ with autocast(enabled=hps.train.fp16_run):
148
+ y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
149
+ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, pitch)
150
+
151
+ mel = spec_to_mel_torch(
152
+ spec,
153
+ hps.data.filter_length,
154
+ hps.data.n_mel_channels,
155
+ hps.data.sampling_rate,
156
+ hps.data.mel_fmin,
157
+ hps.data.mel_fmax)
158
+ y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
159
+ y_hat_mel = mel_spectrogram_torch(
160
+ y_hat.squeeze(1),
161
+ hps.data.filter_length,
162
+ hps.data.n_mel_channels,
163
+ hps.data.sampling_rate,
164
+ hps.data.hop_length,
165
+ hps.data.win_length,
166
+ hps.data.mel_fmin,
167
+ hps.data.mel_fmax
168
+ )
169
+
170
+ y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
171
+
172
+ # Discriminator
173
+ y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
174
+ with autocast(enabled=False):
175
+ loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
176
+ loss_disc_all = loss_disc
177
+ optim_d.zero_grad()
178
+ scaler.scale(loss_disc_all).backward()
179
+ scaler.unscale_(optim_d)
180
+ grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
181
+ scaler.step(optim_d)
182
+
183
+ with autocast(enabled=hps.train.fp16_run):
184
+ # Generator
185
+ y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
186
+ with autocast(enabled=False):
187
+ loss_dur = torch.sum(l_length.float())
188
+ loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
189
+ loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
190
+
191
+ loss_fm = feature_loss(fmap_r, fmap_g)
192
+ loss_gen, losses_gen = generator_loss(y_d_hat_g)
193
+ loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
194
+ optim_g.zero_grad()
195
+ scaler.scale(loss_gen_all).backward()
196
+ scaler.unscale_(optim_g)
197
+ grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
198
+ scaler.step(optim_g)
199
+ scaler.update()
200
+
201
+ if rank==0:
202
+ if global_step % hps.train.log_interval == 0:
203
+ lr = optim_g.param_groups[0]['lr']
204
+ losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
205
+ logger.info('Train Epoch: {} [{:.0f}%]'.format(
206
+ epoch,
207
+ 100. * batch_idx / len(train_loader)))
208
+ logger.info([x.item() for x in losses] + [global_step, lr])
209
+
210
+ scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
211
+ scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
212
+
213
+ scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
214
+ scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
215
+ scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
216
+ image_dict = {
217
+ "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
218
+ "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
219
+ "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
220
+ "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
221
+ }
222
+ utils.summarize(
223
+ writer=writer,
224
+ global_step=global_step,
225
+ images=image_dict,
226
+ scalars=scalar_dict)
227
+
228
+ if global_step % hps.train.eval_interval == 0:
229
+ evaluate(hps, net_g, eval_loader, writer_eval)
230
+ utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
231
+ utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
232
+ global_step += 1
233
+
234
+ if rank == 0:
235
+ logger.info('====> Epoch: {}'.format(epoch))
236
+
237
+
238
+ def evaluate(hps, generator, eval_loader, writer_eval):
239
+ generator.eval()
240
+ with torch.no_grad():
241
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, pitch) in enumerate(eval_loader):
242
+ x, x_lengths = x.cuda(0), x_lengths.cuda(0)
243
+ spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
244
+ y, y_lengths = y.cuda(0), y_lengths.cuda(0)
245
+ pitch = pitch.cuda(0)
246
+ # remove else
247
+ x = x[:1]
248
+ x_lengths = x_lengths[:1]
249
+ spec = spec[:1]
250
+ spec_lengths = spec_lengths[:1]
251
+ y = y[:1]
252
+ y_lengths = y_lengths[:1]
253
+ break
254
+ y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, pitch, max_len=1000)
255
+ y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
256
+
257
+ mel = spec_to_mel_torch(
258
+ spec,
259
+ hps.data.filter_length,
260
+ hps.data.n_mel_channels,
261
+ hps.data.sampling_rate,
262
+ hps.data.mel_fmin,
263
+ hps.data.mel_fmax)
264
+ y_hat_mel = mel_spectrogram_torch(
265
+ y_hat.squeeze(1).float(),
266
+ hps.data.filter_length,
267
+ hps.data.n_mel_channels,
268
+ hps.data.sampling_rate,
269
+ hps.data.hop_length,
270
+ hps.data.win_length,
271
+ hps.data.mel_fmin,
272
+ hps.data.mel_fmax
273
+ )
274
+ image_dict = {
275
+ "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
276
+ }
277
+ audio_dict = {
278
+ "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
279
+ }
280
+ if global_step == 0:
281
+ image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
282
+ audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
283
+
284
+ utils.summarize(
285
+ writer=writer_eval,
286
+ global_step=global_step,
287
+ images=image_dict,
288
+ audios=audio_dict,
289
+ audio_sampling_rate=hps.data.sampling_rate
290
+ )
291
+ generator.train()
292
+
293
+
294
+ if __name__ == "__main__":
295
+ main()
train_ms.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ import itertools
5
+ import math
6
+ import torch
7
+ from torch import nn, optim
8
+ from torch.nn import functional as F
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.tensorboard import SummaryWriter
11
+ import torch.multiprocessing as mp
12
+ import torch.distributed as dist
13
+ from torch.nn.parallel import DistributedDataParallel as DDP
14
+ from torch.cuda.amp import autocast, GradScaler
15
+
16
+ import commons
17
+ import utils
18
+ from data_utils import (
19
+ TextAudioSpeakerLoader,
20
+ TextAudioSpeakerCollate,
21
+ DistributedBucketSampler
22
+ )
23
+ from models import (
24
+ SynthesizerTrn,
25
+ MultiPeriodDiscriminator,
26
+ )
27
+ from losses import (
28
+ generator_loss,
29
+ discriminator_loss,
30
+ feature_loss,
31
+ kl_loss
32
+ )
33
+ from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
34
+ from text.symbols import symbols
35
+
36
+
37
+ torch.backends.cudnn.benchmark = True
38
+ global_step = 0
39
+
40
+
41
+ def main():
42
+ """Assume Single Node Multi GPUs Training Only"""
43
+ assert torch.cuda.is_available(), "CPU training is not allowed."
44
+
45
+ n_gpus = torch.cuda.device_count()
46
+ os.environ['MASTER_ADDR'] = 'localhost'
47
+ os.environ['MASTER_PORT'] = '25565'
48
+
49
+ hps = utils.get_hparams()
50
+ mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
51
+
52
+
53
+ def run(rank, n_gpus, hps):
54
+ global global_step
55
+ if rank == 0:
56
+ logger = utils.get_logger(hps.model_dir)
57
+ logger.info(hps)
58
+ utils.check_git_hash(hps.model_dir)
59
+ writer = SummaryWriter(log_dir=hps.model_dir)
60
+ writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
61
+
62
+ dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
63
+ torch.manual_seed(hps.train.seed)
64
+ torch.cuda.set_device(rank)
65
+
66
+ train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
67
+ train_sampler = DistributedBucketSampler(
68
+ train_dataset,
69
+ hps.train.batch_size,
70
+ [32,300,400,500,600,700,800,900,1000],
71
+ num_replicas=n_gpus,
72
+ rank=rank,
73
+ shuffle=True)
74
+ collate_fn = TextAudioSpeakerCollate()
75
+ train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
76
+ collate_fn=collate_fn, batch_sampler=train_sampler)
77
+ if rank == 0:
78
+ eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
79
+ eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
80
+ batch_size=hps.train.batch_size, pin_memory=True,
81
+ drop_last=False, collate_fn=collate_fn)
82
+
83
+ net_g = SynthesizerTrn(
84
+ len(symbols),
85
+ hps.data.filter_length // 2 + 1,
86
+ hps.train.segment_size // hps.data.hop_length,
87
+ n_speakers=hps.data.n_speakers,
88
+ **hps.model).cuda(rank)
89
+ net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
90
+ optim_g = torch.optim.AdamW(
91
+ net_g.parameters(),
92
+ hps.train.learning_rate,
93
+ betas=hps.train.betas,
94
+ eps=hps.train.eps)
95
+ optim_d = torch.optim.AdamW(
96
+ net_d.parameters(),
97
+ hps.train.learning_rate,
98
+ betas=hps.train.betas,
99
+ eps=hps.train.eps)
100
+ net_g = DDP(net_g, device_ids=[rank])
101
+ net_d = DDP(net_d, device_ids=[rank])
102
+
103
+ try:
104
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
105
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
106
+ global_step = (epoch_str - 1) * len(train_loader)
107
+ except:
108
+ epoch_str = 1
109
+ global_step = 0
110
+
111
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
112
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
113
+
114
+ scaler = GradScaler(enabled=hps.train.fp16_run)
115
+
116
+ for epoch in range(epoch_str, hps.train.epochs + 1):
117
+ if rank==0:
118
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
119
+ else:
120
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
121
+ scheduler_g.step()
122
+ scheduler_d.step()
123
+
124
+
125
+ def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
126
+ net_g, net_d = nets
127
+ optim_g, optim_d = optims
128
+ scheduler_g, scheduler_d = schedulers
129
+ train_loader, eval_loader = loaders
130
+ if writers is not None:
131
+ writer, writer_eval = writers
132
+
133
+ train_loader.batch_sampler.set_epoch(epoch)
134
+ global global_step
135
+
136
+ net_g.train()
137
+ net_d.train()
138
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, pitch, speakers) in enumerate(train_loader):
139
+ x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
140
+ spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
141
+ y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
142
+ speakers = speakers.cuda(rank, non_blocking=True)
143
+ pitch = pitch.cuda(rank, non_blocking=True)
144
+
145
+ with autocast(enabled=hps.train.fp16_run):
146
+ y_hat, l_length, l_pitch, attn, ids_slice, x_mask, z_mask,\
147
+ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, pitch, speakers)
148
+
149
+ mel = spec_to_mel_torch(
150
+ spec,
151
+ hps.data.filter_length,
152
+ hps.data.n_mel_channels,
153
+ hps.data.sampling_rate,
154
+ hps.data.mel_fmin,
155
+ hps.data.mel_fmax)
156
+ y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
157
+ y_hat_mel = mel_spectrogram_torch(
158
+ y_hat.squeeze(1),
159
+ hps.data.filter_length,
160
+ hps.data.n_mel_channels,
161
+ hps.data.sampling_rate,
162
+ hps.data.hop_length,
163
+ hps.data.win_length,
164
+ hps.data.mel_fmin,
165
+ hps.data.mel_fmax
166
+ )
167
+
168
+ y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
169
+
170
+ # Discriminator
171
+ y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
172
+ with autocast(enabled=False):
173
+ loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
174
+ loss_disc_all = loss_disc
175
+ optim_d.zero_grad()
176
+ scaler.scale(loss_disc_all).backward()
177
+ scaler.unscale_(optim_d)
178
+ grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
179
+ scaler.step(optim_d)
180
+
181
+ with autocast(enabled=hps.train.fp16_run):
182
+ # Generator
183
+ y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
184
+ with autocast(enabled=False):
185
+ loss_dur = torch.sum(l_length.float())
186
+ loss_pitch = torch.sum(l_pitch.float())
187
+ loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
188
+ loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
189
+
190
+ loss_fm = feature_loss(fmap_r, fmap_g)
191
+ loss_gen, losses_gen = generator_loss(y_d_hat_g)
192
+ loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + loss_pitch
193
+ optim_g.zero_grad()
194
+ scaler.scale(loss_gen_all).backward()
195
+ scaler.unscale_(optim_g)
196
+ grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
197
+ scaler.step(optim_g)
198
+ scaler.update()
199
+
200
+ if rank==0:
201
+ if global_step % hps.train.log_interval == 0:
202
+ lr = optim_g.param_groups[0]['lr']
203
+ losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl, loss_pitch]
204
+ logger.info('Train Epoch: {} [{:.0f}%]'.format(
205
+ epoch,
206
+ 100. * batch_idx / len(train_loader)))
207
+ logger.info([x.item() for x in losses] + [global_step, lr])
208
+
209
+ scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
210
+ scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl, "loss/g/pitch": loss_pitch})
211
+
212
+ scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
213
+ scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
214
+ scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
215
+ image_dict = {
216
+ "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
217
+ "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
218
+ "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
219
+ "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
220
+ }
221
+ utils.summarize(
222
+ writer=writer,
223
+ global_step=global_step,
224
+ images=image_dict,
225
+ scalars=scalar_dict)
226
+
227
+ if global_step % hps.train.eval_interval == 0:
228
+ evaluate(hps, net_g, eval_loader, writer_eval)
229
+ utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
230
+ utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
231
+ global_step += 1
232
+
233
+ if rank == 0:
234
+ logger.info('====> Epoch: {}'.format(epoch))
235
+
236
+
237
+ def evaluate(hps, generator, eval_loader, writer_eval):
238
+ generator.eval()
239
+ with torch.no_grad():
240
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, pitch, speakers) in enumerate(eval_loader):
241
+ x, x_lengths = x.cuda(0), x_lengths.cuda(0)
242
+ spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
243
+ y, y_lengths = y.cuda(0), y_lengths.cuda(0)
244
+ speakers = speakers.cuda(0)
245
+ pitch = pitch.cuda(0)
246
+ # remove else
247
+ x = x[:1]
248
+ x_lengths = x_lengths[:1]
249
+ spec = spec[:1]
250
+ spec_lengths = spec_lengths[:1]
251
+ y = y[:1]
252
+ y_lengths = y_lengths[:1]
253
+ speakers = speakers[:1]
254
+ break
255
+ y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, pitch, speakers, max_len=1000)
256
+ y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
257
+
258
+ mel = spec_to_mel_torch(
259
+ spec,
260
+ hps.data.filter_length,
261
+ hps.data.n_mel_channels,
262
+ hps.data.sampling_rate,
263
+ hps.data.mel_fmin,
264
+ hps.data.mel_fmax)
265
+ y_hat_mel = mel_spectrogram_torch(
266
+ y_hat.squeeze(1).float(),
267
+ hps.data.filter_length,
268
+ hps.data.n_mel_channels,
269
+ hps.data.sampling_rate,
270
+ hps.data.hop_length,
271
+ hps.data.win_length,
272
+ hps.data.mel_fmin,
273
+ hps.data.mel_fmax
274
+ )
275
+ image_dict = {
276
+ "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
277
+ }
278
+ audio_dict = {
279
+ "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
280
+ }
281
+ if global_step == 0:
282
+ image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
283
+ audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
284
+
285
+ utils.summarize(
286
+ writer=writer_eval,
287
+ global_step=global_step,
288
+ images=image_dict,
289
+ audios=audio_dict,
290
+ audio_sampling_rate=hps.data.sampling_rate
291
+ )
292
+ generator.train()
293
+
294
+
295
+ if __name__ == "__main__":
296
+ main()
transforms.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+
7
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
8
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
+ DEFAULT_MIN_DERIVATIVE = 1e-3
10
+
11
+
12
+ def piecewise_rational_quadratic_transform(inputs,
13
+ unnormalized_widths,
14
+ unnormalized_heights,
15
+ unnormalized_derivatives,
16
+ inverse=False,
17
+ tails=None,
18
+ tail_bound=1.,
19
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
22
+
23
+ if tails is None:
24
+ spline_fn = rational_quadratic_spline
25
+ spline_kwargs = {}
26
+ else:
27
+ spline_fn = unconstrained_rational_quadratic_spline
28
+ spline_kwargs = {
29
+ 'tails': tails,
30
+ 'tail_bound': tail_bound
31
+ }
32
+
33
+ outputs, logabsdet = spline_fn(
34
+ inputs=inputs,
35
+ unnormalized_widths=unnormalized_widths,
36
+ unnormalized_heights=unnormalized_heights,
37
+ unnormalized_derivatives=unnormalized_derivatives,
38
+ inverse=inverse,
39
+ min_bin_width=min_bin_width,
40
+ min_bin_height=min_bin_height,
41
+ min_derivative=min_derivative,
42
+ **spline_kwargs
43
+ )
44
+ return outputs, logabsdet
45
+
46
+
47
+ def searchsorted(bin_locations, inputs, eps=1e-6):
48
+ bin_locations[..., -1] += eps
49
+ return torch.sum(
50
+ inputs[..., None] >= bin_locations,
51
+ dim=-1
52
+ ) - 1
53
+
54
+
55
+ def unconstrained_rational_quadratic_spline(inputs,
56
+ unnormalized_widths,
57
+ unnormalized_heights,
58
+ unnormalized_derivatives,
59
+ inverse=False,
60
+ tails='linear',
61
+ tail_bound=1.,
62
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
65
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
+ outside_interval_mask = ~inside_interval_mask
67
+
68
+ outputs = torch.zeros_like(inputs)
69
+ logabsdet = torch.zeros_like(inputs)
70
+
71
+ if tails == 'linear':
72
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
+ constant = np.log(np.exp(1 - min_derivative) - 1)
74
+ unnormalized_derivatives[..., 0] = constant
75
+ unnormalized_derivatives[..., -1] = constant
76
+
77
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
+ logabsdet[outside_interval_mask] = 0
79
+ else:
80
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
81
+
82
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
+ inputs=inputs[inside_interval_mask],
84
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
+ inverse=inverse,
88
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
+ min_bin_width=min_bin_width,
90
+ min_bin_height=min_bin_height,
91
+ min_derivative=min_derivative
92
+ )
93
+
94
+ return outputs, logabsdet
95
+
96
+ def rational_quadratic_spline(inputs,
97
+ unnormalized_widths,
98
+ unnormalized_heights,
99
+ unnormalized_derivatives,
100
+ inverse=False,
101
+ left=0., right=1., bottom=0., top=1.,
102
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
105
+ if torch.min(inputs) < left or torch.max(inputs) > right:
106
+ raise ValueError('Input to a transform is not within its domain')
107
+
108
+ num_bins = unnormalized_widths.shape[-1]
109
+
110
+ if min_bin_width * num_bins > 1.0:
111
+ raise ValueError('Minimal bin width too large for the number of bins')
112
+ if min_bin_height * num_bins > 1.0:
113
+ raise ValueError('Minimal bin height too large for the number of bins')
114
+
115
+ widths = F.softmax(unnormalized_widths, dim=-1)
116
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
+ cumwidths = torch.cumsum(widths, dim=-1)
118
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
+ cumwidths = (right - left) * cumwidths + left
120
+ cumwidths[..., 0] = left
121
+ cumwidths[..., -1] = right
122
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
+
124
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
+
126
+ heights = F.softmax(unnormalized_heights, dim=-1)
127
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
+ cumheights = torch.cumsum(heights, dim=-1)
129
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
+ cumheights = (top - bottom) * cumheights + bottom
131
+ cumheights[..., 0] = bottom
132
+ cumheights[..., -1] = top
133
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
134
+
135
+ if inverse:
136
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
137
+ else:
138
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
+
140
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
+
143
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
+ delta = heights / widths
145
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
146
+
147
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
+
150
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
151
+
152
+ if inverse:
153
+ a = (((inputs - input_cumheights) * (input_derivatives
154
+ + input_derivatives_plus_one
155
+ - 2 * input_delta)
156
+ + input_heights * (input_delta - input_derivatives)))
157
+ b = (input_heights * input_derivatives
158
+ - (inputs - input_cumheights) * (input_derivatives
159
+ + input_derivatives_plus_one
160
+ - 2 * input_delta))
161
+ c = - input_delta * (inputs - input_cumheights)
162
+
163
+ discriminant = b.pow(2) - 4 * a * c
164
+ assert (discriminant >= 0).all()
165
+
166
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
167
+ outputs = root * input_bin_widths + input_cumwidths
168
+
169
+ theta_one_minus_theta = root * (1 - root)
170
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
+ * theta_one_minus_theta)
172
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
+ + 2 * input_delta * theta_one_minus_theta
174
+ + input_derivatives * (1 - root).pow(2))
175
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
+
177
+ return outputs, -logabsdet
178
+ else:
179
+ theta = (inputs - input_cumwidths) / input_bin_widths
180
+ theta_one_minus_theta = theta * (1 - theta)
181
+
182
+ numerator = input_heights * (input_delta * theta.pow(2)
183
+ + input_derivatives * theta_one_minus_theta)
184
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
+ * theta_one_minus_theta)
186
+ outputs = input_cumheights + numerator / denominator
187
+
188
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
+ + 2 * input_delta * theta_one_minus_theta
190
+ + input_derivatives * (1 - theta).pow(2))
191
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
+
193
+ return outputs, logabsdet
utils.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import sys
4
+ import argparse
5
+ import logging
6
+ import json
7
+ import subprocess
8
+ import numpy as np
9
+ from scipy.io.wavfile import read
10
+ import torch
11
+
12
+ MATPLOTLIB_FLAG = False
13
+
14
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
+ logger = logging
16
+
17
+
18
+ def load_checkpoint(checkpoint_path, model, optimizer=None):
19
+ assert os.path.isfile(checkpoint_path)
20
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
+ iteration = checkpoint_dict['iteration']
22
+ learning_rate = checkpoint_dict['learning_rate']
23
+ if optimizer is not None:
24
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
+ # print(1111)
26
+ saved_state_dict = checkpoint_dict['model']
27
+ # print(1111)
28
+
29
+ if hasattr(model, 'module'):
30
+ state_dict = model.module.state_dict()
31
+ else:
32
+ state_dict = model.state_dict()
33
+ new_state_dict= {}
34
+ for k, v in state_dict.items():
35
+ try:
36
+ new_state_dict[k] = saved_state_dict[k]
37
+ except:
38
+ logger.info("%s is not in the checkpoint" % k)
39
+ new_state_dict[k] = v
40
+ if hasattr(model, 'module'):
41
+ model.module.load_state_dict(new_state_dict)
42
+ else:
43
+ model.load_state_dict(new_state_dict)
44
+ logger.info("Loaded checkpoint '{}' (iteration {})" .format(
45
+ checkpoint_path, iteration))
46
+ return model, optimizer, learning_rate, iteration
47
+
48
+
49
+ def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
50
+ logger.info("Saving model and optimizer state at iteration {} to {}".format(
51
+ iteration, checkpoint_path))
52
+ if hasattr(model, 'module'):
53
+ state_dict = model.module.state_dict()
54
+ else:
55
+ state_dict = model.state_dict()
56
+ torch.save({'model': state_dict,
57
+ 'iteration': iteration,
58
+ 'optimizer': optimizer.state_dict(),
59
+ 'learning_rate': learning_rate}, checkpoint_path)
60
+
61
+
62
+ def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
63
+ for k, v in scalars.items():
64
+ writer.add_scalar(k, v, global_step)
65
+ for k, v in histograms.items():
66
+ writer.add_histogram(k, v, global_step)
67
+ for k, v in images.items():
68
+ writer.add_image(k, v, global_step, dataformats='HWC')
69
+ for k, v in audios.items():
70
+ writer.add_audio(k, v, global_step, audio_sampling_rate)
71
+
72
+
73
+ def latest_checkpoint_path(dir_path, regex="G_*.pth"):
74
+ f_list = glob.glob(os.path.join(dir_path, regex))
75
+ f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
76
+ x = f_list[-1]
77
+ print(x)
78
+ return x
79
+
80
+
81
+ def plot_spectrogram_to_numpy(spectrogram):
82
+ global MATPLOTLIB_FLAG
83
+ if not MATPLOTLIB_FLAG:
84
+ import matplotlib
85
+ matplotlib.use("Agg")
86
+ MATPLOTLIB_FLAG = True
87
+ mpl_logger = logging.getLogger('matplotlib')
88
+ mpl_logger.setLevel(logging.WARNING)
89
+ import matplotlib.pylab as plt
90
+ import numpy as np
91
+
92
+ fig, ax = plt.subplots(figsize=(10,2))
93
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
94
+ interpolation='none')
95
+ plt.colorbar(im, ax=ax)
96
+ plt.xlabel("Frames")
97
+ plt.ylabel("Channels")
98
+ plt.tight_layout()
99
+
100
+ fig.canvas.draw()
101
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
102
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
103
+ plt.close()
104
+ return data
105
+
106
+
107
+ def plot_alignment_to_numpy(alignment, info=None):
108
+ global MATPLOTLIB_FLAG
109
+ if not MATPLOTLIB_FLAG:
110
+ import matplotlib
111
+ matplotlib.use("Agg")
112
+ MATPLOTLIB_FLAG = True
113
+ mpl_logger = logging.getLogger('matplotlib')
114
+ mpl_logger.setLevel(logging.WARNING)
115
+ import matplotlib.pylab as plt
116
+ import numpy as np
117
+
118
+ fig, ax = plt.subplots(figsize=(6, 4))
119
+ im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
120
+ interpolation='none')
121
+ fig.colorbar(im, ax=ax)
122
+ xlabel = 'Decoder timestep'
123
+ if info is not None:
124
+ xlabel += '\n\n' + info
125
+ plt.xlabel(xlabel)
126
+ plt.ylabel('Encoder timestep')
127
+ plt.tight_layout()
128
+
129
+ fig.canvas.draw()
130
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
131
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
132
+ plt.close()
133
+ return data
134
+
135
+
136
+ def load_wav_to_torch(full_path):
137
+ sampling_rate, data = read(full_path)
138
+ return torch.FloatTensor(data.astype(np.float32)), sampling_rate
139
+
140
+
141
+ def load_filepaths_and_text(filename, split="|"):
142
+ with open(filename, encoding='utf-8') as f:
143
+ filepaths_and_text = [line.strip().split(split) for line in f]
144
+ return filepaths_and_text
145
+
146
+
147
+ def get_hparams(init=True):
148
+ parser = argparse.ArgumentParser()
149
+ parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
150
+ help='JSON file for configuration')
151
+ parser.add_argument('-m', '--model', type=str, required=True,
152
+ help='Model name')
153
+
154
+ args = parser.parse_args()
155
+ model_dir = os.path.join("./logs", args.model)
156
+
157
+ if not os.path.exists(model_dir):
158
+ os.makedirs(model_dir)
159
+
160
+ config_path = args.config
161
+ config_save_path = os.path.join(model_dir, "config.json")
162
+ if init:
163
+ with open(config_path, "r") as f:
164
+ data = f.read()
165
+ with open(config_save_path, "w") as f:
166
+ f.write(data)
167
+ else:
168
+ with open(config_save_path, "r") as f:
169
+ data = f.read()
170
+ config = json.loads(data)
171
+
172
+ hparams = HParams(**config)
173
+ hparams.model_dir = model_dir
174
+ return hparams
175
+
176
+
177
+ def get_hparams_from_dir(model_dir):
178
+ config_save_path = os.path.join(model_dir, "config.json")
179
+ with open(config_save_path, "r") as f:
180
+ data = f.read()
181
+ config = json.loads(data)
182
+
183
+ hparams =HParams(**config)
184
+ hparams.model_dir = model_dir
185
+ return hparams
186
+
187
+
188
+ def get_hparams_from_file(config_path):
189
+ with open(config_path, "r") as f:
190
+ data = f.read()
191
+ config = json.loads(data)
192
+
193
+ hparams =HParams(**config)
194
+ return hparams
195
+
196
+
197
+ def check_git_hash(model_dir):
198
+ source_dir = os.path.dirname(os.path.realpath(__file__))
199
+ if not os.path.exists(os.path.join(source_dir, ".git")):
200
+ logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
201
+ source_dir
202
+ ))
203
+ return
204
+
205
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
206
+
207
+ path = os.path.join(model_dir, "githash")
208
+ if os.path.exists(path):
209
+ saved_hash = open(path).read()
210
+ if saved_hash != cur_hash:
211
+ logger.warn("git hash values are different. {}(saved) != {}(current)".format(
212
+ saved_hash[:8], cur_hash[:8]))
213
+ else:
214
+ open(path, "w").write(cur_hash)
215
+
216
+
217
+ def get_logger(model_dir, filename="train.log"):
218
+ global logger
219
+ logger = logging.getLogger(os.path.basename(model_dir))
220
+ logger.setLevel(logging.DEBUG)
221
+
222
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
223
+ if not os.path.exists(model_dir):
224
+ os.makedirs(model_dir)
225
+ h = logging.FileHandler(os.path.join(model_dir, filename))
226
+ h.setLevel(logging.DEBUG)
227
+ h.setFormatter(formatter)
228
+ logger.addHandler(h)
229
+ return logger
230
+
231
+
232
+ class HParams():
233
+ def __init__(self, **kwargs):
234
+ for k, v in kwargs.items():
235
+ if type(v) == dict:
236
+ v = HParams(**v)
237
+ self[k] = v
238
+
239
+ def keys(self):
240
+ return self.__dict__.keys()
241
+
242
+ def items(self):
243
+ return self.__dict__.items()
244
+
245
+ def values(self):
246
+ return self.__dict__.values()
247
+
248
+ def __len__(self):
249
+ return len(self.__dict__)
250
+
251
+ def __getitem__(self, key):
252
+ return getattr(self, key)
253
+
254
+ def __setitem__(self, key, value):
255
+ return setattr(self, key, value)
256
+
257
+ def __contains__(self, key):
258
+ return key in self.__dict__
259
+
260
+ def __repr__(self):
261
+ return self.__dict__.__repr__()