zhoucr commited on
Commit
7c6b117
1 Parent(s): 13976a0

First model version

Browse files
.gitattributes CHANGED
@@ -29,3 +29,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
32
+ MyDrive/japanese_base/G_42000.pth filter=lfs diff=lfs merge=lfs -text
33
+ monotonic_align/build/temp.linux-x86_64-3.7/core.o filter=lfs diff=lfs merge=lfs -text
LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2021 Jaehyeon Kim
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
MyDrive/japanese_base/G_42000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0bf9b84190ba7dde3c5f888522f91cec2ddfa767c1d959ee93036b30b6440aa
3
+ size 449797244
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
4
+
5
+
6
+ import json
7
+ import math
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+ from torch.utils.data import DataLoader
12
+
13
+ import commons
14
+ import utils
15
+ from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
16
+ from models import SynthesizerTrn
17
+ from text.symbols import symbols
18
+ from text import text_to_sequence, cleaned_text_to_sequence
19
+ from text.cleaners import japanese_cleaners
20
+ from scipy.io.wavfile import write
21
+
22
+
23
+ def get_text(text, hps):
24
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
25
+ if hps.data.add_blank:
26
+ text_norm = commons.intersperse(text_norm, 0)
27
+ text_norm = torch.LongTensor(text_norm)
28
+ # print(text_norm.shape)
29
+ return text_norm
30
+
31
+ hps = utils.get_hparams_from_file("/mnt/vits_koni/configs/japanese_base.json")
32
+
33
+ net_g = SynthesizerTrn(
34
+ len(symbols),
35
+ hps.data.filter_length // 2 + 1,
36
+ hps.train.segment_size // hps.data.hop_length,
37
+ **hps.model)
38
+ _ = net_g.eval()
39
+
40
+
41
+ _ = utils.load_checkpoint("/tts_koni/MyDrive/japanese_base/G_42000.pth", net_g, None)
42
+
43
+
44
+ def tts(text):
45
+ if len(text) > 150:
46
+ return "Error: Text is too long", None
47
+ stn_tst = get_text(text, hps)
48
+
49
+ with torch.no_grad():
50
+ x_tst = stn_tst.unsqueeze(0)
51
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
52
+ # print(stn_tst.size())
53
+ audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=2)[0][
54
+ 0, 0].data.float().numpy()
55
+ return "Success", (hps.data.sampling_rate, audio)
56
+
57
+
58
+
59
+
60
+
61
+
62
+ app = gr.Blocks()
63
+ with app:
64
+ with gr.Tabs():
65
+ with gr.TabItem("AI koni"):
66
+ tts_input1 = gr.TextArea(label="Text in Japanese (150 words limitation)", value="こんにちは。")
67
+ # tts_input2 = gr.Dropdown(label="Speaker", choices=hps.speakers, type="index", value=hps.speakers[0])
68
+ tts_submit = gr.Button("Generate", variant="primary")
69
+ tts_output1 = gr.Textbox(label="Message")
70
+ tts_output2 = gr.Audio(label="Output")
71
+ tts_submit.click(tts, [tts_input1], [tts_output1, tts_output2])
72
+
73
+ app.launch()
attentions.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ import commons
9
+ import modules
10
+ from modules import LayerNorm
11
+
12
+
13
+ class Encoder(nn.Module):
14
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
15
+ super().__init__()
16
+ self.hidden_channels = hidden_channels
17
+ self.filter_channels = filter_channels
18
+ self.n_heads = n_heads
19
+ self.n_layers = n_layers
20
+ self.kernel_size = kernel_size
21
+ self.p_dropout = p_dropout
22
+ self.window_size = window_size
23
+
24
+ self.drop = nn.Dropout(p_dropout)
25
+ self.attn_layers = nn.ModuleList()
26
+ self.norm_layers_1 = nn.ModuleList()
27
+ self.ffn_layers = nn.ModuleList()
28
+ self.norm_layers_2 = nn.ModuleList()
29
+ for i in range(self.n_layers):
30
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
31
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
32
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
33
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
34
+
35
+ def forward(self, x, x_mask):
36
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
37
+ x = x * x_mask
38
+ for i in range(self.n_layers):
39
+ y = self.attn_layers[i](x, x, attn_mask)
40
+ y = self.drop(y)
41
+ x = self.norm_layers_1[i](x + y)
42
+
43
+ y = self.ffn_layers[i](x, x_mask)
44
+ y = self.drop(y)
45
+ x = self.norm_layers_2[i](x + y)
46
+ x = x * x_mask
47
+ return x
48
+
49
+
50
+ class Decoder(nn.Module):
51
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
52
+ super().__init__()
53
+ self.hidden_channels = hidden_channels
54
+ self.filter_channels = filter_channels
55
+ self.n_heads = n_heads
56
+ self.n_layers = n_layers
57
+ self.kernel_size = kernel_size
58
+ self.p_dropout = p_dropout
59
+ self.proximal_bias = proximal_bias
60
+ self.proximal_init = proximal_init
61
+
62
+ self.drop = nn.Dropout(p_dropout)
63
+ self.self_attn_layers = nn.ModuleList()
64
+ self.norm_layers_0 = nn.ModuleList()
65
+ self.encdec_attn_layers = nn.ModuleList()
66
+ self.norm_layers_1 = nn.ModuleList()
67
+ self.ffn_layers = nn.ModuleList()
68
+ self.norm_layers_2 = nn.ModuleList()
69
+ for i in range(self.n_layers):
70
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
71
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
72
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
73
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
74
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
75
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
76
+
77
+ def forward(self, x, x_mask, h, h_mask):
78
+ """
79
+ x: decoder input
80
+ h: encoder output
81
+ """
82
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
83
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
84
+ x = x * x_mask
85
+ for i in range(self.n_layers):
86
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
87
+ y = self.drop(y)
88
+ x = self.norm_layers_0[i](x + y)
89
+
90
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
91
+ y = self.drop(y)
92
+ x = self.norm_layers_1[i](x + y)
93
+
94
+ y = self.ffn_layers[i](x, x_mask)
95
+ y = self.drop(y)
96
+ x = self.norm_layers_2[i](x + y)
97
+ x = x * x_mask
98
+ return x
99
+
100
+
101
+ class MultiHeadAttention(nn.Module):
102
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
103
+ super().__init__()
104
+ assert channels % n_heads == 0
105
+
106
+ self.channels = channels
107
+ self.out_channels = out_channels
108
+ self.n_heads = n_heads
109
+ self.p_dropout = p_dropout
110
+ self.window_size = window_size
111
+ self.heads_share = heads_share
112
+ self.block_length = block_length
113
+ self.proximal_bias = proximal_bias
114
+ self.proximal_init = proximal_init
115
+ self.attn = None
116
+
117
+ self.k_channels = channels // n_heads
118
+ self.conv_q = nn.Conv1d(channels, channels, 1)
119
+ self.conv_k = nn.Conv1d(channels, channels, 1)
120
+ self.conv_v = nn.Conv1d(channels, channels, 1)
121
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
122
+ self.drop = nn.Dropout(p_dropout)
123
+
124
+ if window_size is not None:
125
+ n_heads_rel = 1 if heads_share else n_heads
126
+ rel_stddev = self.k_channels**-0.5
127
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
128
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
129
+
130
+ nn.init.xavier_uniform_(self.conv_q.weight)
131
+ nn.init.xavier_uniform_(self.conv_k.weight)
132
+ nn.init.xavier_uniform_(self.conv_v.weight)
133
+ if proximal_init:
134
+ with torch.no_grad():
135
+ self.conv_k.weight.copy_(self.conv_q.weight)
136
+ self.conv_k.bias.copy_(self.conv_q.bias)
137
+
138
+ def forward(self, x, c, attn_mask=None):
139
+ q = self.conv_q(x)
140
+ k = self.conv_k(c)
141
+ v = self.conv_v(c)
142
+
143
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
144
+
145
+ x = self.conv_o(x)
146
+ return x
147
+
148
+ def attention(self, query, key, value, mask=None):
149
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
150
+ b, d, t_s, t_t = (*key.size(), query.size(2))
151
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
152
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
153
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
154
+
155
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
156
+ if self.window_size is not None:
157
+ assert t_s == t_t, "Relative attention is only available for self-attention."
158
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
159
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
160
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
161
+ scores = scores + scores_local
162
+ if self.proximal_bias:
163
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
164
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
165
+ if mask is not None:
166
+ scores = scores.masked_fill(mask == 0, -1e4)
167
+ if self.block_length is not None:
168
+ assert t_s == t_t, "Local attention is only available for self-attention."
169
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
170
+ scores = scores.masked_fill(block_mask == 0, -1e4)
171
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
172
+ p_attn = self.drop(p_attn)
173
+ output = torch.matmul(p_attn, value)
174
+ if self.window_size is not None:
175
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
176
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
177
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
178
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
179
+ return output, p_attn
180
+
181
+ def _matmul_with_relative_values(self, x, y):
182
+ """
183
+ x: [b, h, l, m]
184
+ y: [h or 1, m, d]
185
+ ret: [b, h, l, d]
186
+ """
187
+ ret = torch.matmul(x, y.unsqueeze(0))
188
+ return ret
189
+
190
+ def _matmul_with_relative_keys(self, x, y):
191
+ """
192
+ x: [b, h, l, d]
193
+ y: [h or 1, m, d]
194
+ ret: [b, h, l, m]
195
+ """
196
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
197
+ return ret
198
+
199
+ def _get_relative_embeddings(self, relative_embeddings, length):
200
+ max_relative_position = 2 * self.window_size + 1
201
+ # Pad first before slice to avoid using cond ops.
202
+ pad_length = max(length - (self.window_size + 1), 0)
203
+ slice_start_position = max((self.window_size + 1) - length, 0)
204
+ slice_end_position = slice_start_position + 2 * length - 1
205
+ if pad_length > 0:
206
+ padded_relative_embeddings = F.pad(
207
+ relative_embeddings,
208
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
209
+ else:
210
+ padded_relative_embeddings = relative_embeddings
211
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
212
+ return used_relative_embeddings
213
+
214
+ def _relative_position_to_absolute_position(self, x):
215
+ """
216
+ x: [b, h, l, 2*l-1]
217
+ ret: [b, h, l, l]
218
+ """
219
+ batch, heads, length, _ = x.size()
220
+ # Concat columns of pad to shift from relative to absolute indexing.
221
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
222
+
223
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
224
+ x_flat = x.view([batch, heads, length * 2 * length])
225
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
226
+
227
+ # Reshape and slice out the padded elements.
228
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
229
+ return x_final
230
+
231
+ def _absolute_position_to_relative_position(self, x):
232
+ """
233
+ x: [b, h, l, l]
234
+ ret: [b, h, l, 2*l-1]
235
+ """
236
+ batch, heads, length, _ = x.size()
237
+ # padd along column
238
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
239
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
240
+ # add 0's in the beginning that will skew the elements after reshape
241
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
242
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
243
+ return x_final
244
+
245
+ def _attention_bias_proximal(self, length):
246
+ """Bias for self-attention to encourage attention to close positions.
247
+ Args:
248
+ length: an integer scalar.
249
+ Returns:
250
+ a Tensor with shape [1, 1, length, length]
251
+ """
252
+ r = torch.arange(length, dtype=torch.float32)
253
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
254
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
255
+
256
+
257
+ class FFN(nn.Module):
258
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
259
+ super().__init__()
260
+ self.in_channels = in_channels
261
+ self.out_channels = out_channels
262
+ self.filter_channels = filter_channels
263
+ self.kernel_size = kernel_size
264
+ self.p_dropout = p_dropout
265
+ self.activation = activation
266
+ self.causal = causal
267
+
268
+ if causal:
269
+ self.padding = self._causal_padding
270
+ else:
271
+ self.padding = self._same_padding
272
+
273
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
274
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
275
+ self.drop = nn.Dropout(p_dropout)
276
+
277
+ def forward(self, x, x_mask):
278
+ x = self.conv_1(self.padding(x * x_mask))
279
+ if self.activation == "gelu":
280
+ x = x * torch.sigmoid(1.702 * x)
281
+ else:
282
+ x = torch.relu(x)
283
+ x = self.drop(x)
284
+ x = self.conv_2(self.padding(x * x_mask))
285
+ return x * x_mask
286
+
287
+ def _causal_padding(self, x):
288
+ if self.kernel_size == 1:
289
+ return x
290
+ pad_l = self.kernel_size - 1
291
+ pad_r = 0
292
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
293
+ x = F.pad(x, commons.convert_pad_shape(padding))
294
+ return x
295
+
296
+ def _same_padding(self, x):
297
+ if self.kernel_size == 1:
298
+ return x
299
+ pad_l = (self.kernel_size - 1) // 2
300
+ pad_r = self.kernel_size // 2
301
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
302
+ x = F.pad(x, commons.convert_pad_shape(padding))
303
+ return x
commons.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+
8
+ def init_weights(m, mean=0.0, std=0.01):
9
+ classname = m.__class__.__name__
10
+ if classname.find("Conv") != -1:
11
+ m.weight.data.normal_(mean, std)
12
+
13
+
14
+ def get_padding(kernel_size, dilation=1):
15
+ return int((kernel_size*dilation - dilation)/2)
16
+
17
+
18
+ def convert_pad_shape(pad_shape):
19
+ l = pad_shape[::-1]
20
+ pad_shape = [item for sublist in l for item in sublist]
21
+ return pad_shape
22
+
23
+
24
+ def intersperse(lst, item):
25
+ result = [item] * (len(lst) * 2 + 1)
26
+ result[1::2] = lst
27
+ return result
28
+
29
+
30
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
31
+ """KL(P||Q)"""
32
+ kl = (logs_q - logs_p) - 0.5
33
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ ret = torch.zeros_like(x[:, :, :segment_size])
50
+ for i in range(x.size(0)):
51
+ idx_str = ids_str[i]
52
+ idx_end = idx_str + segment_size
53
+ ret[i] = x[i, :, idx_str:idx_end]
54
+ return ret
55
+
56
+
57
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
+ b, d, t = x.size()
59
+ if x_lengths is None:
60
+ x_lengths = t
61
+ ids_str_max = x_lengths - segment_size + 1
62
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
+ ret = slice_segments(x, ids_str, segment_size)
64
+ return ret, ids_str
65
+
66
+
67
+ def get_timing_signal_1d(
68
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
69
+ position = torch.arange(length, dtype=torch.float)
70
+ num_timescales = channels // 2
71
+ log_timescale_increment = (
72
+ math.log(float(max_timescale) / float(min_timescale)) /
73
+ (num_timescales - 1))
74
+ inv_timescales = min_timescale * torch.exp(
75
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
76
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
79
+ signal = signal.view(1, channels, length)
80
+ return signal
81
+
82
+
83
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
+ b, channels, length = x.size()
85
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
+ return x + signal.to(dtype=x.dtype, device=x.device)
87
+
88
+
89
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
+ b, channels, length = x.size()
91
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
+
94
+
95
+ def subsequent_mask(length):
96
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
+ return mask
98
+
99
+
100
+ @torch.jit.script
101
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
+ n_channels_int = n_channels[0]
103
+ in_act = input_a + input_b
104
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
+ acts = t_act * s_act
107
+ return acts
108
+
109
+
110
+ def convert_pad_shape(pad_shape):
111
+ l = pad_shape[::-1]
112
+ pad_shape = [item for sublist in l for item in sublist]
113
+ return pad_shape
114
+
115
+
116
+ def shift_1d(x):
117
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
+ return x
119
+
120
+
121
+ def sequence_mask(length, max_length=None):
122
+ if max_length is None:
123
+ max_length = length.max()
124
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
+ return x.unsqueeze(0) < length.unsqueeze(1)
126
+
127
+
128
+ def generate_path(duration, mask):
129
+ """
130
+ duration: [b, 1, t_x]
131
+ mask: [b, 1, t_y, t_x]
132
+ """
133
+ device = duration.device
134
+
135
+ b, _, t_y, t_x = mask.shape
136
+ cum_duration = torch.cumsum(duration, -1)
137
+
138
+ cum_duration_flat = cum_duration.view(b * t_x)
139
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
140
+ path = path.view(b, t_x, t_y)
141
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
142
+ path = path.unsqueeze(1).transpose(2,3) * mask
143
+ return path
144
+
145
+
146
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
147
+ if isinstance(parameters, torch.Tensor):
148
+ parameters = [parameters]
149
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
150
+ norm_type = float(norm_type)
151
+ if clip_value is not None:
152
+ clip_value = float(clip_value)
153
+
154
+ total_norm = 0
155
+ for p in parameters:
156
+ param_norm = p.grad.data.norm(norm_type)
157
+ total_norm += param_norm.item() ** norm_type
158
+ if clip_value is not None:
159
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
160
+ total_norm = total_norm ** (1. / norm_type)
161
+ return total_norm
configs/japanese_base.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 2000,
5
+ "seed": 1234,
6
+ "epochs": 10000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 24,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"/mnt/vits_koni/filelists/koni_vocals_text_train_filelist.txt.cleaned",
21
+ "validation_files":"/mnt/vits_koni/filelists/koni_vocals_text_val_filelist.txt.cleaned",
22
+ "text_cleaners":["japanese_cleaners"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 7,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256
52
+ },
53
+ "speakers": ["\u7dbe\u5730\u5be7\u3005", "\u56e0\u5e61\u3081\u3050\u308b", "\u671d\u6b66\u82b3\u4e43", "\u5e38\u9678\u8309\u5b50", "\u30e0\u30e9\u30b5\u30e1", "\u978d\u99ac\u5c0f\u6625", "\u5728\u539f\u4e03\u6d77"],
54
+ "symbols": ["_", ",", ".", "!", "?", "-", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u2193", "\u2191", " "]
55
+ }
configs/japanese_base2.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 1234,
6
+ "epochs": 10000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 32,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/hamidashi_train_filelist.txt.cleaned",
21
+ "validation_files":"filelists/hamidashi_val_filelist.txt.cleaned",
22
+ "text_cleaners":["japanese_cleaners2"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 8,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false,
51
+ "gin_channels": 256
52
+ },
53
+ "speakers": ["\u548c\u6cc9\u5983\u611b", "\u5e38\u76e4\u83ef\u4e43", "\u9326\u3042\u3059\u307f", "\u938c\u5009\u8a69\u685c", "\u7adc\u9591\u5929\u68a8", "\u548c\u6cc9\u91cc", "\u65b0\u5ddd\u5e83\u5922", "\u8056\u8389\u3005\u5b50"],
54
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u2193", "\u2191", " "]
55
+ }
configs/japanese_ss_base2.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 1000,
5
+ "seed": 1234,
6
+ "epochs": 20000,
7
+ "learning_rate": 2e-4,
8
+ "betas": [0.8, 0.99],
9
+ "eps": 1e-9,
10
+ "batch_size": 32,
11
+ "fp16_run": true,
12
+ "lr_decay": 0.999875,
13
+ "segment_size": 8192,
14
+ "init_lr_ratio": 1,
15
+ "warmup_epochs": 0,
16
+ "c_mel": 45,
17
+ "c_kl": 1.0
18
+ },
19
+ "data": {
20
+ "training_files":"filelists/train_filelist.txt.cleaned",
21
+ "validation_files":"filelists/val_filelist.txt.cleaned",
22
+ "text_cleaners":["japanese_cleaners2"],
23
+ "max_wav_value": 32768.0,
24
+ "sampling_rate": 22050,
25
+ "filter_length": 1024,
26
+ "hop_length": 256,
27
+ "win_length": 1024,
28
+ "n_mel_channels": 80,
29
+ "mel_fmin": 0.0,
30
+ "mel_fmax": null,
31
+ "add_blank": true,
32
+ "n_speakers": 0,
33
+ "cleaned_text": true
34
+ },
35
+ "model": {
36
+ "inter_channels": 192,
37
+ "hidden_channels": 192,
38
+ "filter_channels": 768,
39
+ "n_heads": 2,
40
+ "n_layers": 6,
41
+ "kernel_size": 3,
42
+ "p_dropout": 0.1,
43
+ "resblock": "1",
44
+ "resblock_kernel_sizes": [3,7,11],
45
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
46
+ "upsample_rates": [8,8,2,2],
47
+ "upsample_initial_channel": 512,
48
+ "upsample_kernel_sizes": [16,16,4,4],
49
+ "n_layers_q": 3,
50
+ "use_spectral_norm": false
51
+ },
52
+ "speakers": ["\u30eb\u30a4\u30ba"],
53
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u2193", "\u2191", " "]
54
+ }
data_utils.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import os
3
+ import random
4
+ import numpy as np
5
+ import torch
6
+ import torch.utils.data
7
+
8
+ import commons
9
+ from mel_processing import spectrogram_torch
10
+ from utils import load_wav_to_torch, load_filepaths_and_text
11
+ from text import text_to_sequence, cleaned_text_to_sequence
12
+
13
+
14
+ class TextAudioLoader(torch.utils.data.Dataset):
15
+ """
16
+ 1) loads audio, text pairs
17
+ 2) normalizes text and converts them to sequences of integers
18
+ 3) computes spectrograms from audio files.
19
+ """
20
+ def __init__(self, audiopaths_and_text, hparams):
21
+ self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
22
+ self.text_cleaners = hparams.text_cleaners
23
+ self.max_wav_value = hparams.max_wav_value
24
+ self.sampling_rate = hparams.sampling_rate
25
+ self.filter_length = hparams.filter_length
26
+ self.hop_length = hparams.hop_length
27
+ self.win_length = hparams.win_length
28
+ self.sampling_rate = hparams.sampling_rate
29
+
30
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
31
+
32
+ self.add_blank = hparams.add_blank
33
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
34
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
35
+
36
+ random.seed(1234)
37
+ random.shuffle(self.audiopaths_and_text)
38
+ self._filter()
39
+
40
+
41
+ def _filter(self):
42
+ """
43
+ Filter text & store spec lengths
44
+ """
45
+ # Store spectrogram lengths for Bucketing
46
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
47
+ # spec_length = wav_length // hop_length
48
+
49
+ audiopaths_and_text_new = []
50
+ lengths = []
51
+ for audiopath, text in self.audiopaths_and_text:
52
+ audiopath = '/mnt/vits_koni/' + audiopath #赫赫 没想到好办法就在这里写路径了
53
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
54
+ audiopaths_and_text_new.append([audiopath, text])
55
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
56
+ self.audiopaths_and_text = audiopaths_and_text_new
57
+ self.lengths = lengths
58
+
59
+ def get_audio_text_pair(self, audiopath_and_text):
60
+ # separate filename and text
61
+ audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
62
+ text = self.get_text(text)
63
+ spec, wav = self.get_audio(audiopath)
64
+ return (text, spec, wav)
65
+
66
+ def get_audio(self, filename):
67
+ audio, sampling_rate = load_wav_to_torch(filename)
68
+ if sampling_rate != self.sampling_rate:
69
+ raise ValueError("{} {} SR doesn't match target {} SR".format(
70
+ sampling_rate, self.sampling_rate))
71
+ audio_norm = audio / self.max_wav_value
72
+ audio_norm = audio_norm.unsqueeze(0)
73
+ spec_filename = filename.replace(".wav", ".spec.pt")
74
+ if os.path.exists(spec_filename):
75
+ spec = torch.load(spec_filename)
76
+ else:
77
+ spec = spectrogram_torch(audio_norm, self.filter_length,
78
+ self.sampling_rate, self.hop_length, self.win_length,
79
+ center=False)
80
+ spec = torch.squeeze(spec, 0)
81
+ torch.save(spec, spec_filename)
82
+ return spec, audio_norm
83
+
84
+ def get_text(self, text):
85
+ if self.cleaned_text:
86
+ text_norm = cleaned_text_to_sequence(text)
87
+ else:
88
+ text_norm = text_to_sequence(text, self.text_cleaners)
89
+ if self.add_blank:
90
+ text_norm = commons.intersperse(text_norm, 0)
91
+ text_norm = torch.LongTensor(text_norm)
92
+ return text_norm
93
+
94
+ def __getitem__(self, index):
95
+ return self.get_audio_text_pair(self.audiopaths_and_text[index])
96
+
97
+ def __len__(self):
98
+ return len(self.audiopaths_and_text)
99
+
100
+
101
+ class TextAudioCollate():
102
+ """ Zero-pads model inputs and targets
103
+ """
104
+ def __init__(self, return_ids=False):
105
+ self.return_ids = return_ids
106
+
107
+ def __call__(self, batch):
108
+ """Collate's training batch from normalized text and aduio
109
+ PARAMS
110
+ ------
111
+ batch: [text_normalized, spec_normalized, wav_normalized]
112
+ """
113
+ # Right zero-pad all one-hot text sequences to max input length
114
+ _, ids_sorted_decreasing = torch.sort(
115
+ torch.LongTensor([x[1].size(1) for x in batch]),
116
+ dim=0, descending=True)
117
+
118
+ max_text_len = max([len(x[0]) for x in batch])
119
+ max_spec_len = max([x[1].size(1) for x in batch])
120
+ max_wav_len = max([x[2].size(1) for x in batch])
121
+
122
+ text_lengths = torch.LongTensor(len(batch))
123
+ spec_lengths = torch.LongTensor(len(batch))
124
+ wav_lengths = torch.LongTensor(len(batch))
125
+
126
+ text_padded = torch.LongTensor(len(batch), max_text_len)
127
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
128
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
129
+ text_padded.zero_()
130
+ spec_padded.zero_()
131
+ wav_padded.zero_()
132
+ for i in range(len(ids_sorted_decreasing)):
133
+ row = batch[ids_sorted_decreasing[i]]
134
+
135
+ text = row[0]
136
+ text_padded[i, :text.size(0)] = text
137
+ text_lengths[i] = text.size(0)
138
+
139
+ spec = row[1]
140
+ spec_padded[i, :, :spec.size(1)] = spec
141
+ spec_lengths[i] = spec.size(1)
142
+
143
+ wav = row[2]
144
+ wav_padded[i, :, :wav.size(1)] = wav
145
+ wav_lengths[i] = wav.size(1)
146
+
147
+ if self.return_ids:
148
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
149
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
150
+
151
+
152
+ """Multi speaker version"""
153
+ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
154
+ """
155
+ 1) loads audio, speaker_id, text pairs
156
+ 2) normalizes text and converts them to sequences of integers
157
+ 3) computes spectrograms from audio files.
158
+ """
159
+ def __init__(self, audiopaths_sid_text, hparams):
160
+ self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
161
+ self.text_cleaners = hparams.text_cleaners
162
+ self.max_wav_value = hparams.max_wav_value
163
+ self.sampling_rate = hparams.sampling_rate
164
+ self.filter_length = hparams.filter_length
165
+ self.hop_length = hparams.hop_length
166
+ self.win_length = hparams.win_length
167
+ self.sampling_rate = hparams.sampling_rate
168
+
169
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
170
+
171
+ self.add_blank = hparams.add_blank
172
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
173
+ self.max_text_len = getattr(hparams, "max_text_len", 190)
174
+
175
+ random.seed(1234)
176
+ random.shuffle(self.audiopaths_sid_text)
177
+ self._filter()
178
+
179
+ def _filter(self):
180
+ """
181
+ Filter text & store spec lengths
182
+ """
183
+ # Store spectrogram lengths for Bucketing
184
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
185
+ # spec_length = wav_length // hop_length
186
+
187
+ audiopaths_sid_text_new = []
188
+ lengths = []
189
+ for audiopath, sid, text in self.audiopaths_sid_text:
190
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
191
+ audiopaths_sid_text_new.append([audiopath, sid, text])
192
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
193
+ self.audiopaths_sid_text = audiopaths_sid_text_new
194
+ self.lengths = lengths
195
+
196
+ def get_audio_text_speaker_pair(self, audiopath_sid_text):
197
+ # separate filename, speaker_id and text
198
+ audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
199
+ text = self.get_text(text)
200
+ spec, wav = self.get_audio(audiopath)
201
+ sid = self.get_sid(sid)
202
+ return (text, spec, wav, sid)
203
+
204
+ def get_audio(self, filename):
205
+ audio, sampling_rate = load_wav_to_torch(filename)
206
+ if sampling_rate != self.sampling_rate:
207
+ raise ValueError("{} {} SR doesn't match target {} SR".format(
208
+ sampling_rate, self.sampling_rate))
209
+ audio_norm = audio / self.max_wav_value
210
+ audio_norm = audio_norm.unsqueeze(0)
211
+ spec_filename = filename.replace(".wav", ".spec.pt")
212
+ if os.path.exists(spec_filename):
213
+ spec = torch.load(spec_filename)
214
+ else:
215
+ spec = spectrogram_torch(audio_norm, self.filter_length,
216
+ self.sampling_rate, self.hop_length, self.win_length,
217
+ center=False)
218
+ spec = torch.squeeze(spec, 0)
219
+ torch.save(spec, spec_filename)
220
+ return spec, audio_norm
221
+
222
+ def get_text(self, text):
223
+ if self.cleaned_text:
224
+ text_norm = cleaned_text_to_sequence(text)
225
+ else:
226
+ text_norm = text_to_sequence(text, self.text_cleaners)
227
+ if self.add_blank:
228
+ text_norm = commons.intersperse(text_norm, 0)
229
+ text_norm = torch.LongTensor(text_norm)
230
+ return text_norm
231
+
232
+ def get_sid(self, sid):
233
+ sid = torch.LongTensor([int(sid)])
234
+ return sid
235
+
236
+ def __getitem__(self, index):
237
+ return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
238
+
239
+ def __len__(self):
240
+ return len(self.audiopaths_sid_text)
241
+
242
+
243
+ class TextAudioSpeakerCollate():
244
+ """ Zero-pads model inputs and targets
245
+ """
246
+ def __init__(self, return_ids=False):
247
+ self.return_ids = return_ids
248
+
249
+ def __call__(self, batch):
250
+ """Collate's training batch from normalized text, audio and speaker identities
251
+ PARAMS
252
+ ------
253
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
254
+ """
255
+ # Right zero-pad all one-hot text sequences to max input length
256
+ _, ids_sorted_decreasing = torch.sort(
257
+ torch.LongTensor([x[1].size(1) for x in batch]),
258
+ dim=0, descending=True)
259
+
260
+ max_text_len = max([len(x[0]) for x in batch])
261
+ max_spec_len = max([x[1].size(1) for x in batch])
262
+ max_wav_len = max([x[2].size(1) for x in batch])
263
+
264
+ text_lengths = torch.LongTensor(len(batch))
265
+ spec_lengths = torch.LongTensor(len(batch))
266
+ wav_lengths = torch.LongTensor(len(batch))
267
+ sid = torch.LongTensor(len(batch))
268
+
269
+ text_padded = torch.LongTensor(len(batch), max_text_len)
270
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
271
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
272
+ text_padded.zero_()
273
+ spec_padded.zero_()
274
+ wav_padded.zero_()
275
+ for i in range(len(ids_sorted_decreasing)):
276
+ row = batch[ids_sorted_decreasing[i]]
277
+
278
+ text = row[0]
279
+ text_padded[i, :text.size(0)] = text
280
+ text_lengths[i] = text.size(0)
281
+
282
+ spec = row[1]
283
+ spec_padded[i, :, :spec.size(1)] = spec
284
+ spec_lengths[i] = spec.size(1)
285
+
286
+ wav = row[2]
287
+ wav_padded[i, :, :wav.size(1)] = wav
288
+ wav_lengths[i] = wav.size(1)
289
+
290
+ sid[i] = row[3]
291
+
292
+ if self.return_ids:
293
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
294
+ return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
295
+
296
+
297
+ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
298
+ """
299
+ Maintain similar input lengths in a batch.
300
+ Length groups are specified by boundaries.
301
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
302
+
303
+ It removes samples which are not included in the boundaries.
304
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
305
+ """
306
+ def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
307
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
308
+ self.lengths = dataset.lengths
309
+ self.batch_size = batch_size
310
+ self.boundaries = boundaries
311
+
312
+ self.buckets, self.num_samples_per_bucket = self._create_buckets()
313
+ self.total_size = sum(self.num_samples_per_bucket)
314
+ self.num_samples = self.total_size // self.num_replicas
315
+
316
+ def _create_buckets(self):
317
+ buckets = [[] for _ in range(len(self.boundaries) - 1)]
318
+ for i in range(len(self.lengths)):
319
+ length = self.lengths[i]
320
+ idx_bucket = self._bisect(length)
321
+ if idx_bucket != -1:
322
+ buckets[idx_bucket].append(i)
323
+
324
+ for i in range(len(buckets) - 1, 0, -1):
325
+ if len(buckets[i]) == 0:
326
+ buckets.pop(i)
327
+ self.boundaries.pop(i+1)
328
+
329
+ num_samples_per_bucket = []
330
+ for i in range(len(buckets)):
331
+ len_bucket = len(buckets[i])
332
+ total_batch_size = self.num_replicas * self.batch_size
333
+ rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
334
+ num_samples_per_bucket.append(len_bucket + rem)
335
+ return buckets, num_samples_per_bucket
336
+
337
+ def __iter__(self):
338
+ # deterministically shuffle based on epoch
339
+ g = torch.Generator()
340
+ g.manual_seed(self.epoch)
341
+
342
+ indices = []
343
+ if self.shuffle:
344
+ for bucket in self.buckets:
345
+ indices.append(torch.randperm(len(bucket), generator=g).tolist())
346
+ else:
347
+ for bucket in self.buckets:
348
+ indices.append(list(range(len(bucket))))
349
+
350
+ batches = []
351
+ for i in range(len(self.buckets)):
352
+ bucket = self.buckets[i]
353
+ len_bucket = len(bucket)
354
+ ids_bucket = indices[i]
355
+ num_samples_bucket = self.num_samples_per_bucket[i]
356
+
357
+ # add extra samples to make it evenly divisible
358
+ rem = num_samples_bucket - len_bucket
359
+ ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
360
+
361
+ # subsample
362
+ ids_bucket = ids_bucket[self.rank::self.num_replicas]
363
+
364
+ # batching
365
+ for j in range(len(ids_bucket) // self.batch_size):
366
+ batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]]
367
+ batches.append(batch)
368
+
369
+ if self.shuffle:
370
+ batch_ids = torch.randperm(len(batches), generator=g).tolist()
371
+ batches = [batches[i] for i in batch_ids]
372
+ self.batches = batches
373
+
374
+ assert len(self.batches) * self.batch_size == self.num_samples
375
+ return iter(self.batches)
376
+
377
+ def _bisect(self, x, lo=0, hi=None):
378
+ if hi is None:
379
+ hi = len(self.boundaries) - 1
380
+
381
+ if hi > lo:
382
+ mid = (hi + lo) // 2
383
+ if self.boundaries[mid] < x and x <= self.boundaries[mid+1]:
384
+ return mid
385
+ elif x <= self.boundaries[mid]:
386
+ return self._bisect(x, lo, mid)
387
+ else:
388
+ return self._bisect(x, mid + 1, hi)
389
+ else:
390
+ return -1
391
+
392
+ def __len__(self):
393
+ return self.num_samples // self.batch_size
filelists/koni_vocals_text_train_filelist.txt ADDED
The diff for this file is too large to render. See raw diff
 
filelists/koni_vocals_text_train_filelist.txt.cleaned ADDED
The diff for this file is too large to render. See raw diff
 
filelists/koni_vocals_text_val_filelist.txt ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wav/koni_vocals_08_26_03_401.wav|もしかしたら
2
+ wav/koni_vocals_08_26_03_402.wav|女の子の方が好きなのかな
3
+ wav/koni_vocals_08_26_03_404.wav|出来上がっとこ邪魔健康にいて埋め込めんだスープライン沿い目立つ服装を
4
+ wav/koni_vocals_08_26_03_405.wav|メリットは苦笑
5
+ wav/koni_vocals_08_26_03_407.wav|愛が良くしていくやぎさん日継続チャメが特捜て待ってるだけるってこと
6
+ wav/koni_vocals_08_26_03_408.wav|街で目立ちたいになったらゴミ袋とかかぶってあの全身スパンコールの服とか着るといいよ
7
+ wav/koni_vocals_08_26_03_409.wav|頭に高名所さんみたいな感じで
8
+ wav/koni_vocals_08_26_03_410.wav|ゴミ袋をかぶって変身スターコーリャンお尻だけ穴開けてダストけどメタメタめがけると思う
9
+ wav/koni_vocals_08_26_03_411.wav|娘達人女性この服
10
+ wav/koni_vocals_08_26_03_412.wav|結構ここには好きだけどね女んかいとかなもしかしたら
11
+ wav/koni_vocals_08_26_03_414.wav|ありがとうございまいた
12
+ wav/koni_vocals_08_26_03_415.wav|比較e北京家に呼んだ
13
+ wav/koni_vocals_08_26_03_417.wav|これはどこなのいや
14
+ wav/koni_vocals_08_26_03_418.wav|これ以上の普通車一台ピープルじゃない
15
+ wav/koni_vocals_08_26_03_419.wav|もうなんかとどの中のシンプルを極めたみたいなこやんこれ
16
+ wav/koni_vocals_08_26_03_421.wav|雑魚映画ええと
17
+ wav/koni_vocals_08_26_03_425.wav|犯人ですかね
18
+ wav/koni_vocals_08_26_03_426.wav|ご本人ですか猫多頭もんね
19
+ wav/koni_vocals_08_26_03_428.wav|各いいですねうん
20
+ wav/koni_vocals_08_26_03_430.wav|見て応援します
21
+ wav/koni_vocals_08_26_03_431.wav|上品に危険いっぱい
22
+ wav/koni_vocals_08_26_03_437.wav|ああなん踏み出さ
23
+ wav/koni_vocals_08_26_03_439.wav|案外とこのパン
24
+ wav/koni_vocals_08_26_03_440.wav|ちょっとドキッとと叩いてあったことことと受けて
25
+ wav/koni_vocals_08_26_03_441.wav|パイロットって
26
+ wav/koni_vocals_08_26_03_442.wav|やっとうん母今日は
27
+ wav/koni_vocals_08_26_03_443.wav|なんかこの僕は医学お父さんっぽい
28
+ wav/koni_vocals_08_26_03_444.wav|お蝶さんとかが
29
+ wav/koni_vocals_08_26_03_445.wav|休日に着ている服っぽい
30
+ wav/koni_vocals_08_26_03_446.wav|めちゃくちゃ進歩だうん
31
+ wav/koni_vocals_08_26_03_447.wav|めっちゃシンプルだね
32
+ wav/koni_vocals_08_26_03_449.wav|人たちもなんか
33
+ wav/koni_vocals_08_26_03_450.wav|観光というか遊びに来てるっぽいけど
34
+ wav/koni_vocals_08_26_03_451.wav|ここはどういうどういった場所なんだろう
35
+ wav/koni_vocals_08_26_03_452.wav|なんていうとこなんで
36
+ wav/koni_vocals_08_26_03_453.wav|これは本に書いてるこ
37
+ wav/koni_vocals_08_26_03_455.wav|カタカナのコディ容量
38
+ wav/koni_vocals_08_26_03_456.wav|テーマパークっぽいよね何回の
39
+ wav/koni_vocals_08_26_03_458.wav|いいとはおじさんぽいですか美味しさ
40
+ wav/koni_vocals_08_26_03_459.wav|ぽいっていうか
41
+ wav/koni_vocals_08_26_03_460.wav|言ってもヒッポるじゃない男方小学生から
42
+ wav/koni_vocals_08_26_03_461.wav|美味しいちゃんまりキレる最もシンプルな服みたいな感じ
43
+ wav/koni_vocals_08_26_03_462.wav|最もシンプルなくって感じ
44
+ wav/koni_vocals_08_26_03_464.wav|結構いいと思う逆に
45
+ wav/koni_vocals_08_26_03_466.wav|誰にも外れないみたいな
46
+ wav/koni_vocals_08_26_03_469.wav|かなりいいよいや
47
+ wav/koni_vocals_08_26_03_470.wav|ありがとうござんましたヒット御前試合外特に理解度ティーン
48
+ wav/koni_vocals_08_26_03_471.wav|これで朴葉枝人がリラクでは永代この季節でちょっと待て
49
+ wav/koni_vocals_08_26_03_472.wav|この後続け文字やメロン
50
+ wav/koni_vocals_08_26_03_473.wav|みんな本当にこの絵文字が好きだなこの歯を出して笑っているを
51
+ wav/koni_vocals_08_26_03_474.wav|この絵文字付きやら
52
+ wav/koni_vocals_08_26_03_476.wav|これは名前がないのかな
53
+ wav/koni_vocals_08_26_03_477.wav|名前が無いっぽいタイトルと名前がこれ
54
+ wav/koni_vocals_08_26_03_479.wav|これ何歩足開いこれうん
55
+ wav/koni_vocals_08_26_03_480.wav|金ロイ体位的な言い過ぎたよ
56
+ wav/koni_vocals_08_26_03_481.wav|これ文のみでなんと
57
+ wav/koni_vocals_08_26_03_482.wav|綾波でって眼鏡かけてた眼鏡元出かけてたっけ
58
+ wav/koni_vocals_08_26_03_483.wav|右利きに年貢それはそんなごじゃないここには二次元のていたき来てる人がいいなあと思いますよ生い立ちどんどん引け後いいじゃないですか
59
+ wav/koni_vocals_08_26_03_488.wav|もし付き合っている人がいて
60
+ wav/koni_vocals_08_26_03_489.wav|その人がずっとこう
61
+ wav/koni_vocals_08_26_03_490.wav|二次元の人ばかりを着ていたら
62
+ wav/koni_vocals_08_26_03_491.wav|皆どこから大したでこにはその人のことをぶっ飛ばすかも知れないけど
63
+ wav/koni_vocals_08_26_03_492.wav|まあゲーム猫にの
64
+ wav/koni_vocals_08_26_03_493.wav|国の方をあの褒めてくれるんだったらこの
65
+ wav/koni_vocals_08_26_03_494.wav|計画を着てくれるんだからまあいいけどねいっそ一緒こにだけの痛tを着てくれるんだたいけど
66
+ wav/koni_vocals_08_26_03_495.wav|クラギー太弾いてすべき安倍と遅れますとジューシーファン感に
67
+ wav/koni_vocals_08_26_03_498.wav|褒められないのに無理やり褒めてるのいい
68
+ wav/koni_vocals_08_26_03_500.wav|やって来てはいますっていうことないわ恋に気持ち悪いって言ってください良くて直江津に来
69
+ wav/koni_vocals_08_26_03_502.wav|気負って歩くはないだろう
70
+ wav/koni_vocals_08_26_03_504.wav|fスープ市は意外とやって待って待って待って待って
71
+ wav/koni_vocals_08_26_03_505.wav|だってこんななんかさ
72
+ wav/koni_vocals_08_26_03_506.wav|今夜無試験を見てるみたいに色んなそういったことないわむりやを見てないは恋に
73
+ wav/koni_vocals_08_26_03_507.wav|ちゃんとそのその洋服の中の良いところ褒めてますから
74
+ wav/koni_vocals_08_26_03_508.wav|なんかこの家嫌いとかそうじゃなイカれた国
75
+ wav/koni_vocals_08_26_04_0.wav|この低何キモすぎとか思わないから
76
+ wav/koni_vocals_08_26_04_1.wav|これはリアルリアルリアルな反応です
77
+ wav/koni_vocals_08_26_04_7.wav|このっくいとないけどね
78
+ wav/koni_vocals_08_26_04_8.wav|一気にに検討
79
+ wav/koni_vocals_08_26_04_9.wav|正確に国の二時間を許すガウンに利権を許す的みんな二次元軍を辞め
80
+ wav/koni_vocals_08_26_04_10.wav|ときにじちゃんは許せば
81
+ wav/koni_vocals_08_26_04_14.wav|いいと思うけどな
82
+ wav/koni_vocals_08_26_04_17.wav|このちょうどね
83
+ wav/koni_vocals_08_26_04_18.wav|ちょうど新品のとこらへんが
84
+ wav/koni_vocals_08_26_04_19.wav|キランと光っているのもいいと思います
85
+ wav/koni_vocals_08_26_04_20.wav|ここの国の為にとポイントはこういうちょっと
86
+ wav/koni_vocals_08_26_04_21.wav|ちょっと危うい場所に近づくに
87
+ wav/koni_vocals_08_26_04_22.wav|連れてこうキラキラと光っている小屋もう素晴らしいaをよくいと思いますよこんなお洋服を
88
+ wav/koni_vocals_08_26_04_24.wav|こんなをよく聞いたいですかねこのきらきらとです
89
+ wav/koni_vocals_08_26_04_25.wav|股間が行き交っている
90
+ wav/koni_vocals_08_26_04_26.wav|日本の女みんなそうなんですか
91
+ wav/koni_vocals_08_26_04_28.wav|日本の女の子みんなそうっていうのは
92
+ wav/koni_vocals_08_26_04_29.wav|どういうことを日本の女の子めちゃくちゃ悪口言うと思うよ
93
+ wav/koni_vocals_08_26_04_30.wav|あいつのたらいいか付き無けない夢気分よねって言って全然いよと思う大丈夫
94
+ wav/koni_vocals_08_26_04_31.wav|全然悪口いるから安心してそんなにみんなちゃんと終わる来ちゃったよから安心して大丈夫
95
+ wav/koni_vocals_08_26_04_32.wav|次行きましょう
96
+ wav/koni_vocals_08_26_04_34.wav|お塩が愛読んうん
97
+ wav/koni_vocals_08_26_04_35.wav|内陸エコみたいな空間弥運内の
98
+ wav/koni_vocals_08_26_04_36.wav|こういった愛読
99
+ wav/koni_vocals_08_26_04_42.wav|これも従兄弟そんなことないよね音だと言って
100
+ wav/koni_vocals_08_26_04_43.wav|これこれはちゃんと女性だと言ってお願いお願いお願い
101
+ wav/koni_vocals_08_26_04_44.wav|これはアドフィードよと言ってくれやめて
102
+ wav/koni_vocals_08_26_04_45.wav|もうやめた某国でこれも従兄弟ですよとかユダヤなきゃ鬼高爽子ににこれはちゃんとした女の子と用途誰か言ってくれ頼む
103
+ wav/koni_vocals_08_26_04_46.wav|こんなに平や平ネタ切れた
104
+ wav/koni_vocals_08_26_04_48.wav|足押し入ってきれいじゃない
105
+ wav/koni_vocals_08_26_04_49.wav|惜しいフラウとしててきれいな気がするんだけど
106
+ wav/koni_vocals_08_26_04_51.wav|百パーと声が
107
+ wav/koni_vocals_08_26_04_53.wav|これ食べていないと本当に
108
+ wav/koni_vocals_08_26_04_54.wav|はいで的ちょっと
109
+ wav/koni_vocals_08_26_04_55.wav|ここにいちゃんをちょっと
110
+ wav/koni_vocals_08_26_04_56.wav|技術力もいいとこ着ていない
111
+ wav/koni_vocals_08_26_04_58.wav|これ冗談の台詞の
112
+ wav/koni_vocals_08_26_04_60.wav|可愛いんだけど
113
+ wav/koni_vocals_08_26_04_62.wav|なんか制服っぽいけど
114
+ wav/koni_vocals_08_26_04_63.wav|中国の制服を
115
+ wav/koni_vocals_08_26_04_64.wav|中国を中国の学校の制服っぽい感じ
116
+ wav/koni_vocals_08_26_04_65.wav|そういったもう日本のアニメかなんかの制服かなあんまり金日本だと普通ではない政府この感じだよね
117
+ wav/koni_vocals_08_26_04_66.wav|めっちゃ可愛くない
118
+ wav/koni_vocals_08_26_04_67.wav|idが狙いなのでこれ
119
+ wav/koni_vocals_08_26_04_68.wav|idないんですよ
120
+ wav/koni_vocals_08_26_04_69.wav|この前がなく見
121
+ wav/koni_vocals_08_26_04_71.wav|灼眼のシャナのことじゃあそうなんだ
122
+ wav/koni_vocals_08_26_04_72.wav|やっぱこうしてなった
123
+ wav/koni_vocals_08_26_04_73.wav|こうしてっぽい可愛
124
+ wav/koni_vocals_08_26_04_75.wav|これは可愛いね中国にもない日本にも年の日本でもあんまりこういうセコ
125
+ wav/koni_vocals_08_26_04_76.wav|の行きがけ利用なんか
126
+ wav/koni_vocals_08_26_04_77.wav|そもそも日本ってもっとこうなんかシンプルな色な気がするから
127
+ wav/koni_vocals_08_26_04_78.wav|いいねこんな緑色にさんゴールドのリボンでしょうこんなけ食ったらめっちゃ可愛くない
128
+ wav/koni_vocals_08_26_04_79.wav|ここにこんな制服あったらこんなこところ行きたかったが
129
+ wav/koni_vocals_08_26_04_80.wav|学校側医者の知らないの
130
+ wav/koni_vocals_08_26_04_83.wav|会社の車が知らない
131
+ wav/koni_vocals_08_26_04_84.wav|勢いづいよ可愛い猫でありがとう
132
+ wav/koni_vocals_08_26_04_88.wav|女として女と会ってる友達とかいない彼
133
+ wav/koni_vocals_08_26_04_89.wav|ちょっと会ってみたいな生で見てみたい女装している人に
134
+ wav/koni_vocals_08_26_04_91.wav|やはりるところが多すぎてトロトロした後で終わったらくり一人でペロペロ
135
+ wav/koni_vocals_08_26_04_93.wav|これでとりあえず一個面終わりだ
136
+ wav/koni_vocals_08_26_04_94.wav|第二段階に第二個目に行く前にちょっとみんなにニコニの
137
+ wav/koni_vocals_08_26_04_95.wav|個人のやっても見ても夜を一緒
138
+ wav/koni_vocals_08_26_04_96.wav|でもてこられて
139
+ wav/koni_vocals_08_26_04_100.wav|ボインボインですね
140
+ wav/koni_vocals_08_26_04_101.wav|かわいいですよこれは
141
+ wav/koni_vocals_08_26_04_103.wav|浮気巨になったということですね
142
+ wav/koni_vocals_08_26_04_105.wav|かわいいよね
143
+ wav/koni_vocals_08_26_04_106.wav|フータをぽいよねほた覚えキョンシーかな
144
+ wav/koni_vocals_08_26_04_107.wav|今日日にぽいよね可愛いですこれは
145
+ wav/koni_vocals_08_26_04_109.wav|へ行く国はい
146
+ wav/koni_vocals_08_26_04_110.wav|ここにではない通常猫に
147
+ wav/koni_vocals_08_26_04_112.wav|別にみんなさんがわけではない子には
148
+ wav/koni_vocals_08_26_04_114.wav|ただそういう
149
+ wav/koni_vocals_08_26_04_115.wav|そういう訳お借りしたんだと採用
150
+ wav/koni_vocals_08_26_04_116.wav|講義のスリーdだみたい
151
+ wav/koni_vocals_08_26_04_117.wav|そんなものない
152
+ wav/koni_vocals_08_26_04_118.wav|ここに載せリーリーなんてものはない
153
+ wav/koni_vocals_08_26_04_119.wav|待ってじゃこ飯が
154
+ wav/koni_vocals_08_26_04_122.wav|着てそうなやつを見せてやろう
155
+ wav/koni_vocals_08_26_04_123.wav|ちょっと待って女の子って調べるとマジで制服の位置だとか言ってくる
156
+ wav/koni_vocals_08_26_04_124.wav|結局月ばっか出てこれだけど
157
+ wav/koni_vocals_08_26_04_130.wav|ええとニャン待ってね
158
+ wav/koni_vocals_08_26_04_131.wav|ちょっと待って今探してるから
159
+ wav/koni_vocals_08_26_04_132.wav|めっちゃくちゃ日本人っぽいと思いでしょう
160
+ wav/koni_vocals_08_26_04_133.wav|三つ日本人ぽいですね駅終わりそうな客席床に寝
161
+ wav/koni_vocals_08_26_04_146.wav|三次元で借りてる家やたわい
162
+ wav/koni_vocals_08_26_04_147.wav|だいたいもこんな感じの服着てますね
163
+ wav/koni_vocals_08_26_04_149.wav|私はこのように
164
+ wav/koni_vocals_08_26_04_150.wav|日本人ポン屋根に言われると思った日本人ぽいって思われてたろうなと思った
165
+ wav/koni_vocals_08_26_04_153.wav|新田な検証が着
166
+ wav/koni_vocals_08_26_04_157.wav|こういうの号車れないよね
167
+ wav/koni_vocals_08_26_04_158.wav|不思議なことに
168
+ wav/koni_vocals_08_26_04_163.wav|天然娘に自分の三次元の
169
+ wav/koni_vocals_08_26_04_164.wav|箱を見せろなんて思ってなかったから
170
+ wav/koni_vocals_08_26_04_165.wav|天然用意してないけどな
171
+ wav/koni_vocals_08_26_04_169.wav|結構あんまりね
172
+ wav/koni_vocals_08_26_04_170.wav|家とは決まってないんだけど
173
+ wav/koni_vocals_08_26_04_171.wav|これで今日パキッ次です
174
+ wav/koni_vocals_08_26_04_172.wav|と恋とか愛とか
175
+ wav/koni_vocals_08_26_04_174.wav|ファイルてか
176
+ wav/koni_vocals_08_26_04_176.wav|こんなんとか
177
+ wav/koni_vocals_08_26_04_177.wav|毎日イヤイヤ期の樺太待ってねえっと
178
+ wav/koni_vocals_08_26_04_179.wav|こんなんとかと
179
+ wav/koni_vocals_08_26_04_184.wav|会えるかな内科主な
180
+ wav/koni_vocals_08_26_04_185.wav|次世代ようなあとと嫌いなんでとまた違うかもうん
181
+ wav/koni_vocals_08_26_04_187.wav|嫌い服はあんまり着ないんだけどね
182
+ wav/koni_vocals_08_26_04_189.wav|インターぜひ引き続きあげてくで前婚に
183
+ wav/koni_vocals_08_26_04_191.wav|顔色な��か顔色って
184
+ wav/koni_vocals_08_26_04_193.wav|十年ぐらいやってはいけ落ちないっていうな地雷じゃないだろ三十五怖い
185
+ wav/koni_vocals_08_26_04_198.wav|セイバー携帯人ごとねえええ
186
+ wav/koni_vocals_08_26_04_199.wav|はいとこんな感じですね
187
+ wav/koni_vocals_08_26_04_201.wav|やっぱりここにはアパートね
188
+ wav/koni_vocals_08_26_04_202.wav|バイトは行ってないうん
189
+ wav/koni_vocals_08_26_04_203.wav|こういうやっぱメイド服は
190
+ wav/koni_vocals_08_26_04_204.wav|ここには常にみんなの面倒で痛いので
191
+ wav/koni_vocals_08_26_04_206.wav|メイド服はね
192
+ wav/koni_vocals_08_26_04_207.wav|規定たい常に公明党みんなのねーと携帯とかですね
193
+ wav/koni_vocals_08_26_04_210.wav|ポイントですよ
194
+ wav/koni_vocals_08_26_04_215.wav|併設武器は結局りますあなたの体が切られるかどうかがこれだけは心配しています足が
195
+ wav/koni_vocals_08_26_04_216.wav|いやつ切りだ
196
+ wav/koni_vocals_08_26_04_217.wav|切れてたらたりまえだよ
197
+ wav/koni_vocals_08_26_04_221.wav|三四はないです大いに土やってないけどにあってるっていうね
198
+ wav/koni_vocals_08_26_04_222.wav|頭でかい人ねって思ってても似合ってますねって言え夫
199
+ wav/koni_vocals_08_26_04_223.wav|移転中材としたことありますか夜も常にメイドですから
200
+ wav/koni_vocals_08_26_04_226.wav|心ではメイドですからね
201
+ wav/koni_vocals_08_26_04_228.wav|あのオムライスを作るんですよはい
202
+ wav/koni_vocals_08_26_04_229.wav|ここにおまおまナイスもいまいち米友達すごい作り作れますけどオムライス作って高家キャップでこういう絵を描くんでしょお前も今日みたいな下げ決まって居神社の奉納にすか
203
+ wav/koni_vocals_08_26_04_230.wav|ほんと踊り子にでくま読んじゃうん
204
+ wav/koni_vocals_08_26_04_231.wav|もうもう破れてこのかた常にメイドでメイドとして
205
+ wav/koni_vocals_08_26_04_232.wav|ちょっとまけろでもメイドけ使えるデオの誰かに
206
+ wav/koni_vocals_08_26_04_233.wav|ここにどちらかというと使う兵隊んですよ
207
+ wav/koni_vocals_08_26_04_234.wav|メールで使えるんですよね
208
+ wav/koni_vocals_08_26_04_236.wav|ここにあんなに人に支えたくはないんですけど
209
+ wav/koni_vocals_08_26_04_237.wav|どちらかというと使われたいというか
210
+ wav/koni_vocals_08_26_04_238.wav|ご主人様でありたいみたいな感じなんです
211
+ wav/koni_vocals_08_26_04_242.wav|見ていきますか次
212
+ wav/koni_vocals_08_26_04_244.wav|煮込みー教室を
213
+ wav/koni_vocals_08_26_04_245.wav|みんな休憩終わったちゃんとお茶飲んで寝よ皆もお水を言うのでね
214
+ wav/koni_vocals_08_26_04_249.wav|やっぱりその
215
+ wav/koni_vocals_08_26_04_252.wav|ありがとうございますありがとうありがとうございまずもちゃをぐちゃぐちゃ置いたお茶を
216
+ wav/koni_vocals_08_26_04_254.wav|こうやってめっちゃかわいいたけど
217
+ wav/koni_vocals_08_26_04_255.wav|エレベータ書くのはネット
218
+ wav/koni_vocals_08_26_04_258.wav|ヒッキーでも
219
+ wav/koni_vocals_08_26_04_259.wav|i保証足めっちゃ綺麗
220
+ wav/koni_vocals_08_26_04_262.wav|これは可愛いわ
221
+ wav/koni_vocals_08_26_04_263.wav|上田をいいな
222
+ wav/koni_vocals_08_26_04_268.wav|これ議題に米も
223
+ wav/koni_vocals_08_26_04_271.wav|じゃあいいよ
224
+ wav/koni_vocals_08_26_04_272.wav|こっち場合ちゃう
225
+ wav/koni_vocals_08_26_04_273.wav|めちゃくちゃ可愛いんですよこれが
226
+ wav/koni_vocals_08_26_04_275.wav|じゃあをケアを橋を焼きを議論
227
+ wav/koni_vocals_08_26_04_277.wav|家ゲー板じゃをもっと
228
+ wav/koni_vocals_08_26_04_278.wav|p返品できちゃうちゃんいいや
229
+ wav/koni_vocals_08_26_04_280.wav|かわいいよね美しいよねここにあ
230
+ wav/koni_vocals_08_26_04_282.wav|影影の部分が未来になる
231
+ wav/koni_vocals_08_26_04_283.wav|足と足の甲児たちを空にいるこの隙間から見えるものがね
232
+ wav/koni_vocals_08_26_04_285.wav|古田起北も増えたおがかわいいかないですか
233
+ wav/koni_vocals_08_26_04_286.wav|平坦がかわいいからさもう
234
+ wav/koni_vocals_08_26_04_288.wav|またライティングだろう
235
+ wav/koni_vocals_08_26_04_289.wav|腰を手で土佐驚きのかわいい感じの人みたいになっちゃうよね
236
+ wav/koni_vocals_08_26_04_290.wav|わかりなんか吠えたもう成約いた方が可愛すぎて
237
+ wav/koni_vocals_08_26_04_293.wav|何をしても加えた仏教みたいな
238
+ wav/koni_vocals_08_26_04_294.wav|小池あるよね
239
+ wav/koni_vocals_08_26_04_297.wav|歌をこうしてやったら可愛くなれるのかのう
240
+ wav/koni_vocals_08_26_04_299.wav|ここにも無理だな
241
+ wav/koni_vocals_08_26_04_301.wav|で終わりだな
242
+ wav/koni_vocals_08_26_04_303.wav|なんか決まってには今ね
243
+ wav/koni_vocals_08_26_04_304.wav|新しいやつが何個か来たの��ちょっと追加しよう
244
+ wav/koni_vocals_08_26_04_306.wav|ちょっと待ってな
245
+ wav/koni_vocals_08_26_04_310.wav|はいはいはい
246
+ wav/koni_vocals_08_26_04_312.wav|こんな感じではい上ちゃんでしたありがとうめっちゃ可愛かった落ちて入りがいとこになんで買わないんですかえ何をコスプレを
247
+ wav/koni_vocals_08_26_04_315.wav|できない子にコスプレとか緊張しちゃって次
248
+ wav/koni_vocals_08_26_04_317.wav|どういうどうやっぽー
249
+ wav/koni_vocals_08_26_04_318.wav|ちょっと前で拳に消えって書いたが
250
+ wav/koni_vocals_08_26_04_320.wav|今の計画では
251
+ wav/koni_vocals_08_26_04_322.wav|どういうポスターでよく見に行った
252
+ wav/koni_vocals_08_26_04_323.wav|この左胸の所にある赤いものは何
253
+ wav/koni_vocals_08_26_04_331.wav|違うなまさかに結合なのでこうしたそんなコナンないよね
254
+ wav/koni_vocals_08_26_04_333.wav|渦など終わるわけそんな折あるわけない
255
+ wav/koni_vocals_08_26_04_334.wav|やめたみんな本当に相手
256
+ wav/koni_vocals_08_26_04_336.wav|最低何体水と水とお金か金
257
+ wav/koni_vocals_08_26_04_338.wav|一行知恵はいいいえ
258
+ wav/koni_vocals_08_26_04_339.wav|中洲美容家の前
259
+ wav/koni_vocals_08_26_04_342.wav|禁煙肉弾周囲や
260
+ wav/koni_vocals_08_26_04_345.wav|氷河期つけてんのが縫いつけたこれ
261
+ wav/koni_vocals_08_26_04_349.wav|そんな陣営の扱いかい
262
+ wav/koni_vocals_08_26_04_351.wav|やっと終えた猫二君にずっといたかったのよ現実にn
263
+ wav/koni_vocals_08_26_04_352.wav|起業者が禁煙に入ったと会いたいと思っていたの
264
+ wav/koni_vocals_08_26_04_353.wav|君に会えてとってもね
265
+ wav/koni_vocals_08_26_04_356.wav|相太後やばい
266
+ wav/koni_vocals_08_26_04_363.wav|気付いたかな
267
+ wav/koni_vocals_08_26_04_364.wav|たぶん間違えたねうん
268
+ wav/koni_vocals_08_26_04_371.wav|現品ですかね灰原晋ですねきっと間フィンのコスプレですね
269
+ wav/koni_vocals_08_26_04_374.wav|はいはいはい
270
+ wav/koni_vocals_08_26_04_376.wav|取りディスクにぱんぱんだですかね
271
+ wav/koni_vocals_08_26_04_381.wav|入れコスプレ
272
+ wav/koni_vocals_08_26_04_382.wav|えーじさんのコストですか
273
+ wav/koni_vocals_08_26_04_384.wav|彼ら現在c引くv金在徳ジャパン
274
+ wav/koni_vocals_08_26_04_386.wav|これで三人の役が気に入られれば国見になって何だこのこのブラストげてるやつなんだろう
275
+ wav/koni_vocals_08_26_04_391.wav|覚えといよう
276
+ wav/koni_vocals_08_26_04_394.wav|他に見たことなみんなわかるのかなここに天井がね
277
+ wav/koni_vocals_08_26_04_395.wav|これなんだろう可愛いなんてなんか
278
+ wav/koni_vocals_08_26_04_396.wav|なんか可愛く見えてきちゃったんだけど
279
+ wav/koni_vocals_08_26_04_399.wav|負けて何なん
280
+ wav/koni_vocals_08_26_04_400.wav|本国のコミきた
281
+ wav/koni_vocals_08_26_04_402.wav|僕の首切りたんですなんだえ
282
+ wav/koni_vocals_08_26_04_403.wav|特にこれで行ったってこと多いよ多い
283
+ wav/koni_vocals_08_26_04_404.wav|あけて日はやめミックよ
284
+ wav/koni_vocals_08_26_04_409.wav|いいと思うよかなりこれはかれない陣営の敵ファッションなとママ
285
+ wav/koni_vocals_08_26_04_410.wav|私を置いていないような味の濃いいいと思うがあ
286
+ wav/koni_vocals_08_26_04_413.wav|いつも人だったんだええフォールエイトールだったのか
287
+ wav/koni_vocals_08_26_04_416.wav|はいじゃまずいとええ
288
+ wav/koni_vocals_08_26_04_417.wav|広げみて空間にあの古い家福井ですね縁深いんですよかなり服にめちゃくちゃいいですけど
289
+ wav/koni_vocals_08_26_04_420.wav|これは捨てるの
290
+ wav/koni_vocals_08_26_04_422.wav|どっか痛い弾けるようにすごくれクール気合がってことですか
291
+ wav/koni_vocals_08_26_04_423.wav|すぐこう言っちゃいけない
292
+ wav/koni_vocals_08_26_04_424.wav|これあのブンブンブンブンブンブン読本文法っていう曲が流れるやってやん
293
+ wav/koni_vocals_08_26_04_425.wav|どういうたいね
294
+ wav/koni_vocals_08_26_04_427.wav|ちゃおちゃおちゃおていってこうやってでしょう避けるのが三
295
+ wav/koni_vocals_08_26_04_428.wav|聞くになったような場所が
296
+ wav/koni_vocals_08_26_04_431.wav|これは自分がかっこいいって酔っているのか
297
+ wav/koni_vocals_08_26_04_432.wav|自分自身よくカッコええといっているのか
298
+ wav/koni_vocals_08_26_04_433.wav|それともこの替えの結果を過去へと言っての事
299
+ wav/koni_vocals_08_26_04_434.wav|とっちゃんにつこじゃ
300
+ wav/koni_vocals_08_26_04_443.wav|n記事サイトはもいちゃん
301
+ wav/koni_vocals_08_26_04_447.wav|でるちゃんだし
302
+ wav/koni_vocals_08_26_04_449.wav|服はいいよなんかこの子したがすぼんでる
303
+ wav/koni_vocals_08_26_04_450.wav|あのパンツカッコいい当たりじゃだったーとかが吐きそうなそれに白いねスニーカーで白シャツ黒いジャケットっちごのはね
304
+ wav/koni_vocals_08_26_04_451.wav|かなりいい点だ女子ウケは高そうびっちゃ
305
+ wav/koni_vocals_08_26_04_452.wav|だからデート服としては
306
+ wav/koni_vocals_08_26_04_453.wav|めちゃくちゃていつの宅配
307
+ wav/koni_vocals_08_26_04_455.wav|これ彼氏が着てきたら
308
+ wav/koni_vocals_08_26_04_456.wav|みんな喜ぶと思う大学かっこいいけどなあと思う
309
+ wav/koni_vocals_08_26_04_457.wav|正せませんただですね
310
+ wav/koni_vocals_08_26_04_458.wav|あのクレール猫の方はいクルーなのでおいマイナス点にさせていただき美味しい
311
+ wav/koni_vocals_08_26_04_459.wav|個人等ですからハイド満点あげても良かったですけどね
312
+ wav/koni_vocals_08_26_04_462.wav|サイクリングので
313
+ wav/koni_vocals_08_26_04_464.wav|あのお米になってですね赤犬です不合格はい
314
+ wav/koni_vocals_08_26_04_467.wav|ような形でもいいしねなんかちゃんと硬いがいいような感じがしてかっこいいなというイメージ
315
+ wav/koni_vocals_08_26_04_470.wav|なんか進歩進歩でで最近の
316
+ wav/koni_vocals_08_26_04_471.wav|何軒なカッコイイを取り入れた感じを
317
+ wav/koni_vocals_08_26_04_472.wav|言い逃げですね次見てみようドベア超短期のあの資源予算の方は映画ねこっちは言っちゃってすみません
318
+ wav/koni_vocals_08_26_04_473.wav|これ良かったよね加藤映画はい
319
+ wav/koni_vocals_08_26_04_474.wav|ちょっとなんか
320
+ wav/koni_vocals_08_26_04_475.wav|なんで顔にこれが付いてるのがちょっとわかんないですけど
321
+ wav/koni_vocals_08_26_04_476.wav|ありがとあげないと数万年ちょっとバラバラになっちゃって
322
+ wav/koni_vocals_08_26_04_480.wav|そこそれを引っ張れを聞いてないよ
323
+ wav/koni_vocals_08_26_04_482.wav|ちょっと待て待て待て待て待てヨイトマケ廃墟オーラジャニーか
324
+ wav/koni_vocals_08_26_04_486.wav|教養なんじゃないですか
325
+ wav/koni_vocals_08_26_04_487.wav|長女お名前と
326
+ wav/koni_vocals_08_26_04_488.wav|タイトルですか容疑府警
327
+ wav/koni_vocals_08_26_04_489.wav|色々につき石うん
328
+ wav/koni_vocals_08_26_04_490.wav|確かにそう言われてみれば雰囲気はいいなまた来いよいっちゃん
329
+ wav/koni_vocals_08_26_04_494.wav|これが今日とか雇用時ならね
330
+ wav/koni_vocals_08_26_04_496.wav|最年長十川よく見てなさいよって感じで
331
+ wav/koni_vocals_08_26_04_502.wav|かっこいいよ
332
+ wav/koni_vocals_08_26_04_503.wav|とアップにしてもかっこいいって何
333
+ wav/koni_vocals_08_26_04_504.wav|めちゃくちゃいい
334
+ wav/koni_vocals_08_26_04_505.wav|なんかちょっと若い若い声もたみたいなのが入って良くない
335
+ wav/koni_vocals_08_26_04_506.wav|じゃあもう個人には個人にはもう大失われてしまったあの頃のエモさみたいなの感じるんだけど
336
+ wav/koni_vocals_08_26_04_507.wav|しかもジャンクちゃう
337
+ wav/koni_vocals_08_26_04_508.wav|なんて女の子
338
+ wav/koni_vocals_08_26_04_509.wav|本人の口に息を上げたとって
339
+ wav/koni_vocals_08_26_04_510.wav|空を多めでなんかそれっぽいよねすごいにごきですかあああチルノこれに置きたいの
340
+ wav/koni_vocals_08_26_04_511.wav|そして人だけじゃない
341
+ wav/koni_vocals_08_26_04_512.wav|かっこいいよ
342
+ wav/koni_vocals_08_26_04_513.wav|日本来たんだえ
343
+ wav/koni_vocals_08_26_04_518.wav|去年とわかんないんだよな金に日本の服のブランドってことだよね
344
+ wav/koni_vocals_08_26_04_519.wav|いやー難しいよ
345
+ wav/koni_vocals_08_26_04_520.wav|わかんないななんかどういう服を着たいのかによっては
346
+ wav/koni_vocals_08_26_04_521.wav|おすすめできるけど
347
+ wav/koni_vocals_08_26_04_522.wav|今人は日本の
filelists/koni_vocals_text_val_filelist.txt.cleaned ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wav/koni_vocals_08_26_03_401.wav|mo↓ʃIka ʃI↑ta↓ra.
2
+ wav/koni_vocals_08_26_03_402.wav|o↑Nna↓nokono ho↓oga sU↑ki↓na no↑kana.
3
+ wav/koni_vocals_08_26_03_404.wav|de↑kiaga↓Qtoko ja↑ma ke↑Nkooni i↑te u↑mekome↓Nda su↑upuraiNzoi me↑da↓tsU fu↑kUsooo.
4
+ wav/koni_vocals_08_26_03_405.wav|me↓riQtowa kU↑ʃoo.
5
+ wav/koni_vocals_08_26_03_407.wav|a↓iga yo↓kuʃIte i↑ku ya↑gisaN↓bi ke↑ezokU ʧa↓mega to↑kUsoote ma↑Qte↓rudakeruQte ko↑to.
6
+ wav/koni_vocals_08_26_03_408.wav|ma↑ʧi↓de me↑daʧItaini na↓Qtara go↑mibu↓kurotoka ka↑bu↓Qte a↑no ze↑NʃiNsUpaNko↓oruno fU↑ku↓toka ki↑ruto i↓iyo.
7
+ wav/koni_vocals_08_26_03_409.wav|a↑tama↓ni ko↑ome↓eʃosaN mi↓taina ka↑Njide.
8
+ wav/koni_vocals_08_26_03_410.wav|go↑mibu↓kuroo ka↑bu↓Qte he↑NʃiNsUtaako↓oryaN o↑ʃiridake a↑na a↑kete da↓sUtokedo me↑ta↓meta me↑gake↓ruto o↑mo↓u.
9
+ wav/koni_vocals_08_26_03_411.wav|mu↑sumetatsujiNjo↓see ko↑no fU↑ku.
10
+ wav/koni_vocals_08_26_03_412.wav|ke↓Qkoo ko↑koniwa sU↑ki↓dakedone o↑Nna↓Nkaitokana mo↓ʃIka ʃI↑ta↓ra.
11
+ wav/koni_vocals_08_26_03_414.wav|a↑rigatoogozaima↓ita.
12
+ wav/koni_vocals_08_26_03_415.wav|hI↑kaku i↓i pe↑kiN↓kani yo↑Nda.
13
+ wav/koni_vocals_08_26_03_417.wav|ko↑rewa do↓kona n o i↑ya.
14
+ wav/koni_vocals_08_26_03_418.wav|ko↑rei↓joono fU↑tsuu ku↑ruma i↑ʧi↓dai pi↓ipurujanai.
15
+ wav/koni_vocals_08_26_03_419.wav|mo↓o na↓Nkato do↓no na↓kano ʃi↓Npuruo ki↑wame↓ta mi↓tai na↑ko↓yaN ko↑re.
16
+ wav/koni_vocals_08_26_03_421.wav|za↑koeegae↓eto.
17
+ wav/koni_vocals_08_26_03_425.wav|ha↓NniNdesUkane.
18
+ wav/koni_vocals_08_26_03_426.wav|go↑ho↓NniNdesUka ne↓ko ta↑atamamoNne.
19
+ wav/koni_vocals_08_26_03_428.wav|ka↑ku↓iidesUne u↓N.
20
+ wav/koni_vocals_08_26_03_430.wav|mi↓te o↑oeN ʃi↑ma↓sU.
21
+ wav/koni_vocals_08_26_03_431.wav|jo↑ohi↓Nni kI↑keNi↓Qpai.
22
+ wav/koni_vocals_08_26_03_437.wav|a↑a↓naN fu↑mide↓sa.
23
+ wav/koni_vocals_08_26_03_439.wav|a↓Ngaito ko↑no pa↓N.
24
+ wav/koni_vocals_08_26_03_440.wav|ʧo↓Qto do↑ki↓Qtoto ta↑ta↓ite a↓Qta ko↑to↓kototo u↑ke↓te.
25
+ wav/koni_vocals_08_26_03_441.wav|pa↓iroQtoQte.
26
+ wav/koni_vocals_08_26_03_442.wav|ya↑Qto u↑N↓haha kyo↓owa.
27
+ wav/koni_vocals_08_26_03_443.wav|na↓Nka ko↑no bo↓kuwa i↑gakuoto↓osaNQpoi.
28
+ wav/koni_vocals_08_26_03_444.wav|o↓ʧoosaNtokaga.
29
+ wav/koni_vocals_08_26_03_445.wav|kyu↑ujitsuni ki↓te i↑ru fU↑ku↓Qpoi.
30
+ wav/koni_vocals_08_26_03_446.wav|me↑ʧakUʧa ʃi↓Npoda u↓N.
31
+ wav/koni_vocals_08_26_03_447.wav|me↓Qʧa ʃi↓Npurudane.
32
+ wav/koni_vocals_08_26_03_449.wav|hI↑to↓taʧimo na↓Nka.
33
+ wav/koni_vocals_08_26_03_450.wav|ka↑Nkooto i↑u↓ka a↑sobini kI↑te↓ruQpoikedo.
34
+ wav/koni_vocals_08_26_03_451.wav|ko↑kowa do↓oyuu do↓o i↑Qta ba↑ʃona N↓daroo.
35
+ wav/koni_vocals_08_26_03_452.wav|na↓Nte i↑u to↓konaNde.
36
+ wav/koni_vocals_08_26_03_453.wav|ko↑rewa ho↓Nni ka↓i te↓ruko.
37
+ wav/koni_vocals_08_26_03_455.wav|ka↑taka↓nano ko↑diyooryo↓o.
38
+ wav/koni_vocals_08_26_03_456.wav|te↑emapa↓akuQpoiyone na↑N↓kaino.
39
+ wav/koni_vocals_08_26_03_458.wav|i↓itowa o↑jisaNpoi↓desUka o↑iʃi↓sa.
40
+ wav/koni_vocals_08_26_03_459.wav|po↓iQte i↑u↓ka.
41
+ wav/koni_vocals_08_26_03_460.wav|i↑Qtemo hi↑Q↓porujanai o↑toko↓kata ʃo↑oga↓kUseekara.
42
+ wav/koni_vocals_08_26_03_461.wav|o↑iʃi↓i ʧa↓N ma↑ri↓kireru mo↑Qto↓mo ʃi↓Npuruna fU↑ku↓mitaina ka↑Nji.
43
+ wav/koni_vocals_08_26_03_462.wav|mo↑Qto↓mo ʃi↓Npuru na↓kuQte ka↑Nji.
44
+ wav/koni_vocals_08_26_03_464.wav|ke↓Qkoo i↓ito o↑mo↓u gya↑kuni.
45
+ wav/koni_vocals_08_26_03_466.wav|da↓renimo ha↑zurenai mi↓taina.
46
+ wav/koni_vocals_08_26_03_469.wav|ka↓nari i↑iyo↓iya.
47
+ wav/koni_vocals_08_26_03_470.wav|a↑rigatoogozaNma↓ʃIta hi↑QtogozeNʃiai↓gai to↓kuni ri↑kai↓do ti↓iN.
48
+ wav/koni_vocals_08_26_03_471.wav|ko↑rede bo↓ku yo↑oeda↓jiNga ri↑ra↓kudewa e↑etai ko↑no ki↓setsude ʧo↓Qto ma↓te.
49
+ wav/koni_vocals_08_26_03_472.wav|ko↑no ko↑ozoku↓ke mo↓jiya me↓roN.
50
+ wav/koni_vocals_08_26_03_473.wav|mi↑Nna ho↑Ntooni ko↑no e↑mojiga sU↑ki↓dana ko↑no ha↓o da↓ʃIte wa↑raQte i↑ruo.
51
+ wav/koni_vocals_08_26_03_474.wav|ko↑no e↑mojitsUkiya↓ra.
52
+ wav/koni_vocals_08_26_03_476.wav|ko↑rewa na↑maega na↓i no↑kana.
53
+ wav/koni_vocals_08_26_03_477.wav|na↑maega na↓iQpoi ta↓itoruto na↑maega ko↑re.
54
+ wav/koni_vocals_08_26_03_479.wav|ko↑re na↑N↓po a↑ʃi hi↑ra↓i ko↑re↓uN.
55
+ wav/koni_vocals_08_26_03_480.wav|ki↓N ro↑itaii↓tekina i↑isugi↓tayo.
56
+ wav/koni_vocals_08_26_03_481.wav|ko↑re↓buNnomide na↓Nto.
57
+ wav/koni_vocals_08_26_03_482.wav|a↑ya↓hadeQte me↓gane ka↑ke↓teta me↓gane mo↑to↓dekaketetaQke.
58
+ wav/koni_vocals_08_26_03_483.wav|mi↑gikIkini ne↑Ngu↓sorewa so↑Nna go↓janai ko↑koniwa ni↑ji↓geNno te↑itakIkite↓ru hI↑toga i↓inaato o↑moima↓sUyo o↑itaʧi do↓NdoN hI↑ke↓go i↓ijanaidesUka.
59
+ wav/koni_vocals_08_26_03_488.wav|mo↓ʃI tsu↑kia↓Qte i↑ru hI↑toga i↑te.
60
+ wav/koni_vocals_08_26_03_489.wav|so↑no hI↑toga zu↑Qto ko↓u.
61
+ wav/koni_vocals_08_26_03_490.wav|ni↑ji↓geNno hI↑toba↓kario ki↓te i↑ta↓ra.
62
+ wav/koni_vocals_08_26_03_491.wav|mi↑na↓dokokara ta↓iʃIta de↓koniwa so↑no hI↑tono ko↑to↓o bu↑Qtoba↓sUkamo ʃi↑renai↓kedo.
63
+ wav/koni_vocals_08_26_03_492.wav|ma↓a ge↑emu↓nekonino.
64
+ wav/koni_vocals_08_26_03_493.wav|ku↑nino ho↓oo a↑no ho↑me↓te ku↑reru N↓daQtara ko↑no.
65
+ wav/koni_vocals_08_26_03_494.wav|ke↑ekakuo ki↓te ku↑reru N↓dakara ma↓a i↓ikedone i↑Qso i↑Qʃo↓konidakeno i↑ta ti↓i o ki↓te ku↑reru N↑data↓ikedo.
66
+ wav/koni_vocals_08_26_03_495.wav|ku↑ra↓gii fU↑to↓hiite su↑be↓ki a↑beto o↑kurema↓sUto ju↓uʃii fa↑N↓kaNni.
67
+ wav/koni_vocals_08_26_03_498.wav|ho↑merare↓nainoni mu↑riyari ho↑mete↓ru n o i↓i.
68
+ wav/koni_vocals_08_26_03_500.wav|ya↑Qteki↓te ha↑ima↓sUQte i↑u ko↑to na↓iwa ko↓ini ki↑moʧi wa↑ru↓iQte i↑Qte ku↑dasa↓i yo↓kUte na↑o↓etsuni k i.
69
+ wav/koni_vocals_08_26_03_502.wav|ki↑o↓Qte a↑ru↓kuwanaidaroo.
70
+ wav/koni_vocals_08_26_03_504.wav|e↓fU su↑upu↓ʃiwa i↑gaito ya↓Qte ma↓Qte ma↓Qte ma↓Qte ma↓Qte.
71
+ wav/koni_vocals_08_26_03_505.wav|da↓Qte ko↑Nnana N↓kasa.
72
+ wav/koni_vocals_08_26_03_506.wav|ko↓Nya mu↑ʃi↓keNo mi↑te↓ru mi↓taini i↑roNna so↑oiQta ko↑to na↓iwa mu↓riyao mi↑te↓naiwa ko↓ini.
73
+ wav/koni_vocals_08_26_03_507.wav|ʧa↑Nto so↑no so↑no yo↑ofUkuno na↓kano yo↓i to↑koro ho↑me↓temasUkara.
74
+ wav/koni_vocals_08_26_03_508.wav|na↓Nka ko↑no i↑e↓giraitoka so↑oja↓na i↑kareta ku↑ni.
75
+ wav/koni_vocals_08_26_04_0.wav|ko↑no te↑enaNkimosugitoka o↑mowa↓naikara.
76
+ wav/koni_vocals_08_26_04_1.wav|ko↑rewa ri↓aruriaru ri↓aruna ha↑Nnoode↓sU.
77
+ wav/koni_vocals_08_26_04_7.wav|ko↑noQ ku↓ito na↓ikedone.
78
+ wav/koni_vocals_08_26_04_8.wav|i↓Qkinini ke↑Ntoo.
79
+ wav/koni_vocals_08_26_04_9.wav|se↑ekakuni ku↑nino ni↑ji↓kaNo yu↑ru↓su ga↓uNni ri↑keNo yu↑rusu↓teki mi↑Nna ni↑jigeN↓guNo ya↑me.
80
+ wav/koni_vocals_08_26_04_10.wav|to↑kini↓jiʧaNwa yu↑ru↓seba.
81
+ wav/koni_vocals_08_26_04_14.wav|i↓ito o↑mo↓ukedona.
82
+ wav/koni_vocals_08_26_04_17.wav|ko↑no ʧo↑odo↓ne.
83
+ wav/koni_vocals_08_26_04_18.wav|ʧo↑odo ʃi↑NpiNno to↑ko↓raheNga.
84
+ wav/koni_vocals_08_26_04_19.wav|ki↑raNto hI↑ka↓Qte i↑ru no↑mo i↓ito o↑moima↓sU.
85
+ wav/koni_vocals_08_26_04_20.wav|ko↑kono ku↑nino ta↑me↓nito po↑iNtowa ko↑oyuu ʧo↓Qto.
86
+ wav/koni_vocals_08_26_04_21.wav|ʧo↓Qto a↑yaui ba↑ʃoni ʧI↑kazukuni.
87
+ wav/koni_vocals_08_26_04_22.wav|tsu↑rete ko↑o ki↑ra↓kirato hI↑ka↓Qte i↑ru ko↑ya mo↓o su↑baraʃi↓i e↓i o yo↓ku i↓to o↑moima↓sUyo ko↑N na↓o yo↑ofUkuo.
88
+ wav/koni_vocals_08_26_04_24.wav|ko↑Nnao yo↓kU ki↑ita↓idesUkane ko↑no ki↓rakiratodesU.
89
+ wav/koni_vocals_08_26_04_25.wav|ko↑kaNga i↑kIka↓Qte i↑ru.
90
+ wav/koni_vocals_08_26_04_26.wav|ni↑Qpo↓Nno o↑NnamiNna↓soona N↓desUka.
91
+ wav/koni_vocals_08_26_04_28.wav|ni↑Qpo↓Nno o↑Nnanokomi↓Nna so↑oQte i↑u no↑wa.
92
+ wav/koni_vocals_08_26_04_29.wav|do↓oyuu ko↑to↓o ni↑Qpo↓Nno o↑Nna↓noko me↑ʧakUʧa wa↑ru↓guʧi i↑uto o↑mo↓uyo.
93
+ wav/koni_vocals_08_26_04_30.wav|a↑itsuno ta↑rai i↑ka tsU↑ki mu↓ke na↓i yu↑meki↓buNyoneQte i↑Qte ze↑NzeN i↑yoto o↑mo↓u da↑ijo↓obu.
94
+ wav/koni_vocals_08_26_04_31.wav|ze↑NzeN wa↑ru↓guʧi i↑ru↓kara a↑NʃiN ʃI↑te so↑Nnani mi↑Nna ʧa↑Nto o↑warukIʧaQta↓yokara a↑NʃiN ʃI↑te da↑ijo↓obu.
95
+ wav/koni_vocals_08_26_04_32.wav|tsu↑gi i↑kimaʃo↓o.
96
+ wav/koni_vocals_08_26_04_34.wav|o↓ʃioga a↑idokuN u↓N.
97
+ wav/koni_vocals_08_26_04_35.wav|na↑iriku↓ekomitaina ku↑ukaNyau↓Nnaino.
98
+ wav/koni_vocals_08_26_04_36.wav|ko↓o i↑Qta a↑idoku.
99
+ wav/koni_vocals_08_26_04_42.wav|ko↑remo i↑to↓ko so↑Nna ko↑to na↓iyone o↑to↓dato i↑Qte.
100
+ wav/koni_vocals_08_26_04_43.wav|ko↑re↓korewa ʧa↑Nto jo↑seedato i↑Qte o↑negaionegaio↓negai.
101
+ wav/koni_vocals_08_26_04_44.wav|ko↑rewa a↑dofi↓idoyoto i↑Qte ku↑reyamete.
102
+ wav/koni_vocals_08_26_04_45.wav|mo↓o ya↑me↓ta bo↓okokude ko↑remo i↑to↓kodesUyotoka yu↓daya na↓kya o↑ni↓daka so↑okonini ko↑rewa ʧa↑Nto ʃI↑ta o↑Nna↓nokoto yo↑oto↓dareka i↑Qte ku↑retanomu.
103
+ wav/koni_vocals_08_26_04_46.wav|ko↑Nnani ta↓iraya hi↑ra↓neta ki↑re↓ta.
104
+ wav/koni_vocals_08_26_04_48.wav|a↑ʃi o↑ʃii↓Qte ki↓reijanai.
105
+ wav/koni_vocals_08_26_04_49.wav|o↑ʃi↓i fu↑ra↓uto ʃi↓tete ki↓reina ki↑ga su↑ru N↓dakedo.
106
+ wav/koni_vocals_08_26_04_51.wav|hya↑ku↓paato ko↓ega.
107
+ wav/koni_vocals_08_26_04_53.wav|ko↑re ta↑be↓te i↑naito ho↑Ntooni.
108
+ wav/koni_vocals_08_26_04_54.wav|ha↑ide↓tekI ʧo↓Qto.
109
+ wav/koni_vocals_08_26_04_55.wav|ko↑koni↓iʧaNo ʧo↓Qto.
110
+ wav/koni_vocals_08_26_04_56.wav|gi↑jutsuryo↓kumo i↓itokokIte i↑nai.
111
+ wav/koni_vocals_08_26_04_58.wav|ko↑rejooda↓Nno se↑rifuno.
112
+ wav/koni_vocals_08_26_04_60.wav|ka↑wai↓i N↓dakedo.
113
+ wav/koni_vocals_08_26_04_62.wav|na↑Nkase↓efUkuQpoikedo.
114
+ wav/koni_vocals_08_26_04_63.wav|ʧu↓ugokuno se↑efUkuo.
115
+ wav/koni_vocals_08_26_04_64.wav|ʧu↓ugokuo ʧu↓ugokuno ga↑Qkoono se↑efUkuQpoi ka↑Nji.
116
+ wav/koni_vocals_08_26_04_65.wav|so↑oiQta mo↓o ni↑Qpo↓Nno a↓nimeka na↓Nkano se↑efUkukana a↑Nmari ki↓N ni↑Qpo↓Ndato fU↑tsuude↓wa na↓i se↓efU ko↑no ka↑Njidayone.
117
+ wav/koni_vocals_08_26_04_66.wav|me↓Qʧa ka↑wai↓kunai.
118
+ wav/koni_vocals_08_26_04_67.wav|a↑idi↓iga ne↑raina↓node ko↑re.
119
+ wav/koni_vocals_08_26_04_68.wav|a↑idi↓i na↓i N↓desUyo.
120
+ wav/koni_vocals_08_26_04_69.wav|ko↑no ma↓ega na↓kumi.
121
+ wav/koni_vocals_08_26_04_71.wav|ʃa↑kugaNno ʃa↓nano ko↑to↓jaa so↑ona N↓da.
122
+ wav/koni_vocals_08_26_04_72.wav|ya↑Qpa ko↑oʃIte na↓Qta.
123
+ wav/koni_vocals_08_26_04_73.wav|ko↑oʃIteQpoikawai.
124
+ wav/koni_vocals_08_26_04_75.wav|ko↑rewa ka↑wai↓ine ʧu↓ugokunimo na↓i ni↑Qpo↓Nnimo to↑ʃi↓no ni↑Qpo↓Ndemo a↑Nmari ko↑oyuu se↓ko.
125
+ wav/koni_vocals_08_26_04_76.wav|n o i↑kigakeriyoona↓Nka.
126
+ wav/koni_vocals_08_26_04_77.wav|so↓mosomo ni↑Qpo↓NQte mo↓Qto ko↓o na↑Nkaʃi↓Npuruna i↑ro↓na ki↑ga su↑ru↓kara.
127
+ wav/koni_vocals_08_26_04_78.wav|i↓ine ko↑Nna mi↑doriiro ni↓saN go↓orudono ri↓boNdeʃoo ko↑N na↑keguQta↓ra me↓Qʧa ka↑wai↓kunai.
128
+ wav/koni_vocals_08_26_04_79.wav|ko↑koni ko↑Nna se↑efUku a↓Qtara ko↑Nna ko↑to ko↓ro i↑kIta↓kaQtaga.
129
+ wav/koni_vocals_08_26_04_80.wav|ga↑Qkoogawa i↑ʃano ʃi↑ranai↓no.
130
+ wav/koni_vocals_08_26_04_83.wav|ka↑iʃano ku↑rumaga ʃi↑ranai.
131
+ wav/koni_vocals_08_26_04_84.wav|i↑kioizu↓iyo ka↑wai↓i ne↓kode a↑ri↓gatoo.
132
+ wav/koni_vocals_08_26_04_88.wav|o↑Nna↓to ʃI↑te o↑Nna↓to a↑Qte↓ru to↑modaʧItoka i↑nai ka↓re.
133
+ wav/koni_vocals_08_26_04_89.wav|ʧo↓Qto a↓Qte mi↓taina se↓ede mi↓te mi↑ta↓i jo↑soo ʃI↑te i↑ru hI↑toni.
134
+ wav/koni_vocals_08_26_04_91.wav|ya↑ha↓ri r u to↑koro↓ga o↓osugite to↓rotoro ʃI↑ta a↓tode o↑waQta↓ra ku↓ri hI↑to↓ride pe↓ropero.
135
+ wav/koni_vocals_08_26_04_93.wav|ko↑rede to↑ria↓ezu i↑Qko↓meN o↑warida.
136
+ wav/koni_vocals_08_26_04_94.wav|da↑inida↓Nkaini da↓i ni↑ko↓meni i↑ku ma↓eni ʧo↓Qto mi↑Nna↓ni ni↑ko ni↓no.
137
+ wav/koni_vocals_08_26_04_95.wav|ko↓jiNno ya↓Qtemo mi↓temo yo↓ruo i↑Qʃo.
138
+ wav/koni_vocals_08_26_04_96.wav|de↓mote ko↓rarete.
139
+ wav/koni_vocals_08_26_04_100.wav|bo↑iNbo↓iNdesUne.
140
+ wav/koni_vocals_08_26_04_101.wav|ka↑wai↓idesUyo ko↑rewa.
141
+ wav/koni_vocals_08_26_04_103.wav|u↑waki↓kyoni na↓Qtato i↑u ko↑to↓desUne.
142
+ wav/koni_vocals_08_26_04_105.wav|ka↑wai↓iyone.
143
+ wav/koni_vocals_08_26_04_106.wav|fu↓utaopoiyone ho↑ta o↑boe kyo↓Nʃiikana.
144
+ wav/koni_vocals_08_26_04_107.wav|kyo↓obinipoiyone ka↑wai↓idesU ko↑rewa.
145
+ wav/koni_vocals_08_26_04_109.wav|e i↑kU ku↑ni↓hai.
146
+ wav/koni_vocals_08_26_04_110.wav|ko↑konide↓wa na↓i tsu↑ujoo↓nekoni.
147
+ wav/koni_vocals_08_26_04_112.wav|be↑tsuni mi↑Nna↓saNga wa↓kedewa na↓i ko↑niwa.
148
+ wav/koni_vocals_08_26_04_114.wav|ta↓da so↑oyuu.
149
+ wav/koni_vocals_08_26_04_115.wav|so↑oyuu wa↓ke o↑kariʃIta N↓dato sa↑iyoo.
150
+ wav/koni_vocals_08_26_04_116.wav|ko↓ogino su↑ri↓i di↓i d a mi↓tai.
151
+ wav/koni_vocals_08_26_04_117.wav|so↑Nna mo↑no na↓i.
152
+ wav/koni_vocals_08_26_04_118.wav|ko↑koni no↑se ri↓i ri↓inaNte mo↑no↓wa na↓i.
153
+ wav/koni_vocals_08_26_04_119.wav|ma↓Qte ja↑ko↓meʃiga.
154
+ wav/koni_vocals_08_26_04_122.wav|ki↓te so↑ona ya↓tsuo mi↑se↓te ya↑roo.
155
+ wav/koni_vocals_08_26_04_123.wav|ʧo↓Qto ma↓Qte o↑Nna↓nokoQte ʃi↑rabe↓ruto ma↓jide se↑efUkuno i↑ʧi↓datoka i↑Qte ku↓ru.
156
+ wav/koni_vocals_08_26_04_124.wav|ke↑Qkyoku tsU↑ki↓baQka de↑te ko↑reda↓kedo.
157
+ wav/koni_vocals_08_26_04_130.wav|e↓eto nya↓N ma↓Qtene.
158
+ wav/koni_vocals_08_26_04_131.wav|ʧo↓Qto ma↓Qte i↓ma sa↑gaʃIte↓rukara.
159
+ wav/koni_vocals_08_26_04_132.wav|me↓Qʧa kU↑ʧa ni↑QpoN↓jiNQpoito o↑mo↓ideʃoo.
160
+ wav/koni_vocals_08_26_04_133.wav|mi↑QtsuniQpoN↓jiNpoidesUne e↓ki o↑wari↓soona kya↑kUseki↓yukani n e.
161
+ wav/koni_vocals_08_26_04_146.wav|sa↑Nji↓geNde ka↑rite↓ru i↑e↓ya ta↑wai.
162
+ wav/koni_vocals_08_26_04_147.wav|da↑itaimo ko↑Nna ka↑Njino fU↑ku ki↓temasUne.
163
+ wav/koni_vocals_08_26_04_149.wav|wa↑taʃiwa ko↑no yo↓oni.
164
+ wav/koni_vocals_08_26_04_150.wav|ni↑QpoN↓jiN po↓N ya↓neni i↑wareruto o↑mo↓Qta ni↑QpoN↓jiNpoiQte o↑mowareteta↓roonato o↑mo↓Qta.
165
+ wav/koni_vocals_08_26_04_153.wav|ʃi↑NdeNna ke↑Nʃooga k i.
166
+ wav/koni_vocals_08_26_04_157.wav|ko↑oyuu no↑gooʃare↓naiyone.
167
+ wav/koni_vocals_08_26_04_158.wav|fU↑ʃigina ko↑to↓ni.
168
+ wav/koni_vocals_08_26_04_163.wav|te↑NneNmu↓sumeni ji↑buNno sa↑Nji↓geNno.
169
+ wav/koni_vocals_08_26_04_164.wav|ha↑koo mi↑se↓ronaNte o↑moQte��nakaQtakara.
170
+ wav/koni_vocals_08_26_04_165.wav|te↑NneNyo↓oi ʃI↑tenai↓kedona.
171
+ wav/koni_vocals_08_26_04_169.wav|ke↓Qkoo a↑Nmari↓ne.
172
+ wav/koni_vocals_08_26_04_170.wav|i↑e↓towa ki↑maQtenai N↓dakedo.
173
+ wav/koni_vocals_08_26_04_171.wav|ko↑rede kyo↓o pa↑ki↓Q tsu↑gi↓desU.
174
+ wav/koni_vocals_08_26_04_172.wav|t o ko↓itoka a↓itoka.
175
+ wav/koni_vocals_08_26_04_174.wav|fa↓iru te↓ka.
176
+ wav/koni_vocals_08_26_04_176.wav|ko↓NnaNtoka.
177
+ wav/koni_vocals_08_26_04_177.wav|ma↑iniʧiiyaiya↓kino ka↑rafUto ma↓Qtene e↑Qto.
178
+ wav/koni_vocals_08_26_04_179.wav|ko↑N na↓Ntokato.
179
+ wav/koni_vocals_08_26_04_184.wav|a↑e↓rukana na↑ika o↓mona.
180
+ wav/koni_vocals_08_26_04_185.wav|ji↑se↓daiyoona a↓toto ki↑rainaNdeto ma↑ta ʧi↑gaukamo u↓N.
181
+ wav/koni_vocals_08_26_04_187.wav|ki↑rai↓fUkuwa a↑Nmari ki↓nai N↓dakedone.
182
+ wav/koni_vocals_08_26_04_189.wav|i↓Ntaa ze↓hi hI↑kitsuzukiagete ku↑de ze↑N↓koNni.
183
+ wav/koni_vocals_08_26_04_191.wav|ka↑oirona↓Nka ka↑oiroQte.
184
+ wav/koni_vocals_08_26_04_193.wav|ju↑uneNgu↓rai ya↓Qtewa i↑keoʧi↓naiQte i↑u↓na ji↑raijanaidaro sa↓Njuu g o ko↑wa↓i.
185
+ wav/koni_vocals_08_26_04_198.wav|se↑ibaakeetaijiNgotone↓e e↓e.
186
+ wav/koni_vocals_08_26_04_199.wav|ha↑itoko↓Nna ka↑Njide↓sUne.
187
+ wav/koni_vocals_08_26_04_201.wav|ya↑Qpa↓ri ko↑koniwa a↑pa↓atone.
188
+ wav/koni_vocals_08_26_04_202.wav|ba↑itowa i↑Qtenai u↓N.
189
+ wav/koni_vocals_08_26_04_203.wav|ko↑oyuu ya↑Qpa me↑ido↓fUkuwa.
190
+ wav/koni_vocals_08_26_04_204.wav|ko↑koniwa tsu↓neni mi↑Nna↓no me↑Ndo↓ode i↑ta↓inode.
191
+ wav/koni_vocals_08_26_04_206.wav|me↑ido↓fUkuwane.
192
+ wav/koni_vocals_08_26_04_207.wav|kI↑teeta i↑jooni ko↑omeetoomi↓Nnanoneeto ke↑etaitokadesUne.
193
+ wav/koni_vocals_08_26_04_210.wav|po↑iNtode↓sUyo.
194
+ wav/koni_vocals_08_26_04_215.wav|he↑esetsu↓bukiwa ke↑Qkyokurima↓sU a↑na↓tano ka↑radaga ki↓rareruka do↓okaga ko↑redakewa ʃi↑Npai ʃI↑te i↑ma↓sU a↑ʃi↓ga.
195
+ wav/koni_vocals_08_26_04_216.wav|i↑ya ts u ki↑ri↓da.
196
+ wav/koni_vocals_08_26_04_217.wav|ki↑retetarata↓ri ma↑edayo.
197
+ wav/koni_vocals_08_26_04_221.wav|sa↓Njuu yo↓NwanaidesU o↓oini tsU↑ʧi ya↑Qte↓naikedo ni↑aQte↓ruQte i↑une.
198
+ wav/koni_vocals_08_26_04_222.wav|a↑tama↓de ka↑ijiNneQte o↑mo↓Qtetemo ni↑a↓QtemasUneQte i↑e o↑Qto.
199
+ wav/koni_vocals_08_26_04_223.wav|i↑teNʧuuzaito ʃI↑ta ko↑to a↑rima↓sUka yo↓rumo tsu↓neni me↓idodesUkara.
200
+ wav/koni_vocals_08_26_04_226.wav|ko↑ko↓rodewa me↓idodesUkarane.
201
+ wav/koni_vocals_08_26_04_228.wav|a↑no o↑mura↓isuo tsU↑ku↓ru N↓desUyo ha↓i.
202
+ wav/koni_vocals_08_26_04_229.wav|ko↑koni o↑maomana↓isumo i↑ma↓iʧi be↑eto↓modaʧI su↑go↓i tsU↑kuri tsU↑kurema↓sUkedo o↑mura↓isU tsu↑ku↓Qte ko↑okakya↓Qpude ko↑oyuu e↓o e↑ga↓ku N↓deʃo o↑maemo kyo↓o mi↓taina sa↑ge ki↑maQte i ji↓Njano ho↑onooni sU↑ka.
203
+ wav/koni_vocals_08_26_04_230.wav|ho↑Ntoo↓dorikonide ku↓ma yo↓Nja u↓N.
204
+ wav/koni_vocals_08_26_04_231.wav|mo↓omoo ya↑bure↓te ko↑no ka↑ta tsu↓neni me↓idode me↓idoto ʃI↑te.
205
+ wav/koni_vocals_08_26_04_232.wav|ʧo↓Qto ma↑kerodemo me↑ido↓ke tsU↑kae↓rude o↓no da↓rekani.
206
+ wav/koni_vocals_08_26_04_233.wav|ko↑koni do↓ʧirakato i↑uto tsU↑kau he↑etaiNde↓sUyo.
207
+ wav/koni_vocals_08_26_04_234.wav|me↑erude tsU↑kae↓ru N↓desUyone.
208
+ wav/koni_vocals_08_26_04_236.wav|ko↑koni a↑Nnani hI↑toni sa↑saeta↓kuwa na↓i N↓desUkedo.
209
+ wav/koni_vocals_08_26_04_237.wav|do↓ʧirakato i↑uto tsU↑kawareta↓ito i↑u↓ka.
210
+ wav/koni_vocals_08_26_04_238.wav|go↑ʃujiNsamadearita↓i mi↓taina ka↑Njina N↓desU.
211
+ wav/koni_vocals_08_26_04_242.wav|mi↓te i↑kima↓sUka tsu↑gi.
212
+ wav/koni_vocals_08_26_04_244.wav|ni↑komiikyo↓oʃItsuo.
213
+ wav/koni_vocals_08_26_04_245.wav|mi↑Nnakyu↓ukee o↑waQta ʧa↑Nto o↑ʧa no↓Nde ne↑yo mi↑namo o↓mizuo i↑u↓nodene.
214
+ wav/koni_vocals_08_26_04_249.wav|ya↑Qpa↓ri so↑no.
215
+ wav/koni_vocals_08_26_04_252.wav|a↑ri↓gatoo go↑zaimasUarigatooarigatoogoza↓i ma↓zu mo↑ʧao gu↓ʧaguʧa o↑ita o↑ʧao.
216
+ wav/koni_vocals_08_26_04_254.wav|ko↓o ya↓Qte me↓Qʧa ka↑waiita↓kedo.
217
+ wav/koni_vocals_08_26_04_255.wav|e↑rebe↓eta ka↓ku no↑wa ne↑Qto.
218
+ wav/koni_vocals_08_26_04_258.wav|hi↓Qkiidemo.
219
+ wav/koni_vocals_08_26_04_259.wav|a↓i ho↑ʃoo↓aʃi me↓Qʧa ki↓ree.
220
+ wav/koni_vocals_08_26_04_262.wav|ko↑rewa ka↑wai↓iwa.
221
+ wav/koni_vocals_08_26_04_263.wav|u↑edao i↓ina.
222
+ wav/koni_vocals_08_26_04_268.wav|ko↑regi↓daini ko↑me↓mo.
223
+ wav/koni_vocals_08_26_04_271.wav|ja↓a i↓iyo.
224
+ wav/koni_vocals_08_26_04_272.wav|ko↑Qʧi ba↑ai ʧa↓u.
225
+ wav/koni_vocals_08_26_04_273.wav|me↑ʧakUʧa ka↑wai↓i N↓desUyo ko↑rega.
226
+ wav/koni_vocals_08_26_04_275.wav|ja↓ao ke↓ao ha↑ʃi↓o ya↑kio gi↓roN.
227
+ wav/koni_vocals_08_26_04_277.wav|i↑egee↓itajao mo↓Qto.
228
+ wav/koni_vocals_08_26_04_278.wav|pi↓i he↑NpiN de↑kIʧau↓ʧaN i↓iya.
229
+ wav/koni_vocals_08_26_04_280.wav|ka���wai↓iyone u↑tsUkuʃi↓iyone ko↑koni a.
230
+ wav/koni_vocals_08_26_04_282.wav|ka↑ge↓kageno bu↓buNga mi↓raini na↓ru.
231
+ wav/koni_vocals_08_26_04_283.wav|a↑ʃi↓to a↑ʃi↓no ka↑butoji↓taʧio so↓rani i↑ru ko↑no sU↑kimakara mi↑e↓ru mo↑no↓gane.
232
+ wav/koni_vocals_08_26_04_285.wav|fu↓ruta o↑kori↓kItamo fu↑e↓ta o↓ga ka↑waiikanaidesU↓ka.
233
+ wav/koni_vocals_08_26_04_286.wav|he↑etaNga ka↑wai↓ikara sa↓mo u.
234
+ wav/koni_vocals_08_26_04_288.wav|ma↑ta ra↑itiNgudaroo.
235
+ wav/koni_vocals_08_26_04_289.wav|ko↑ʃio te↓de to↑sao↓dorokino ka↑wai↓i ka↑Njino hI↑tomi↓taini na↑Qʧau↓yone.
236
+ wav/koni_vocals_08_26_04_290.wav|wa↑ka↓rinaNka ho↑e↓ta mo↓o se↑eyaku i↑ta ho↓oga ka↑wai↓sugite.
237
+ wav/koni_vocals_08_26_04_293.wav|na↓nio ʃI↑temo ku↑wae↓ta bu↓Qkyoomitaina.
238
+ wav/koni_vocals_08_26_04_294.wav|ko↑ike a↓ruyone.
239
+ wav/koni_vocals_08_26_04_297.wav|u↑ta↓o ko↑oʃIte ya↓Qtara ka↑wai↓kunareru no↑kanoo.
240
+ wav/koni_vocals_08_26_04_299.wav|ko↑konimo mu↓ridana.
241
+ wav/koni_vocals_08_26_04_301.wav|d e o↑warida↓na.
242
+ wav/koni_vocals_08_26_04_303.wav|na↓Nka ki↑maQteniwa i↓mane.
243
+ wav/koni_vocals_08_26_04_304.wav|a↑taraʃi↓i ya↓tsuga na↓Nkoka ki↓tanode ʧo↓Qto tsu↑ika ʃi↑yoo.
244
+ wav/koni_vocals_08_26_04_306.wav|ʧo↓Qto ma↓Qtena.
245
+ wav/koni_vocals_08_26_04_310.wav|ha↑ihai↓hai.
246
+ wav/koni_vocals_08_26_04_312.wav|ko↑Nna ka↑Njide ha↑iueʧaNdeʃIta a↑ri↓gatoo me↓Qʧa ka↑wai↓kaQta o↑ʧIte↓iriga i↑to↓koni na↓Nde ka↑wanai N↓desUka e↓nanio ko↑sUpureo.
247
+ wav/koni_vocals_08_26_04_315.wav|de↑ki↓nai ko↑ni ko↑sUpuretoka ki↑Nʧoo ʃI↑ʧaQte tsu↑gi.
248
+ wav/koni_vocals_08_26_04_317.wav|do↓oyuu do↓o ya↓Q po↓o.
249
+ wav/koni_vocals_08_26_04_318.wav|ʧo↓Qto ma↓ede ko↑buʃini ki↑eQte ka↓itaga.
250
+ wav/koni_vocals_08_26_04_320.wav|i↓mano ke↑ekakudewa.
251
+ wav/koni_vocals_08_26_04_322.wav|do↓oyuu po↓sUtaade yo↑kumini i↑Qta.
252
+ wav/koni_vocals_08_26_04_323.wav|ko↑no hi↑dari↓muneno to↑koroni a↓ru a↑kai mo↑no↓wa na↓ni.
253
+ wav/koni_vocals_08_26_04_331.wav|ʧi↑gau↓na ma↓sakani ke↑tsugoona↓node ko↑oʃIta so↑Nna ko↓naN na↓iyone.
254
+ wav/koni_vocals_08_26_04_333.wav|u↓zunado o↑waru wa↓ke so↑Nna o↑ri a↑ruwake↓nai.
255
+ wav/koni_vocals_08_26_04_334.wav|ya↑me↓ta mi↑Nna ho↑Ntooni a↑ite.
256
+ wav/koni_vocals_08_26_04_336.wav|sa↑itee na↑Ntai↓suito mi↑zuto o↑kaneka ki↓N.
257
+ wav/koni_vocals_08_26_04_338.wav|i↑Qkooʧiehaii↓ie.
258
+ wav/koni_vocals_08_26_04_339.wav|na↑kasu bi↑yoo↓kano ma↓e.
259
+ wav/koni_vocals_08_26_04_342.wav|ki↑NeNnikudaNʃu↓uiya.
260
+ wav/koni_vocals_08_26_04_345.wav|hyo↑oga↓kI tsu↑ke↓teN no↑ga nu↑itsUke↓ta ko↑re.
261
+ wav/koni_vocals_08_26_04_349.wav|so↑Nna ji↑Neeno ko↑i↓kai.
262
+ wav/koni_vocals_08_26_04_351.wav|ya↑Qto o↑eta ne↑ko↓nikuNni zu↑Qto i↑ta↓kaQtanoyo ge↑Njitsuni e↓nu.
263
+ wav/koni_vocals_08_26_04_352.wav|ki↑gyooʃaga ki↑NeNni ha↓iQtato a↑ita↓ito o↑mo↓Qte i↑ta↓no.
264
+ wav/koni_vocals_08_26_04_353.wav|ki↑mini a↑e↓te to↑Qtemone.
265
+ wav/koni_vocals_08_26_04_356.wav|so↑ota↓go ya↑ba↓i.
266
+ wav/koni_vocals_08_26_04_363.wav|ki↑zu↓itakana.
267
+ wav/koni_vocals_08_26_04_364.wav|ta↓buN ma↑ʧigae↓tane u↓N.
268
+ wav/koni_vocals_08_26_04_371.wav|ge↓NpiNdesUkane ha↑i↓hara su↑sumude↓sUne ki↑Qto a↑ida↓fiNno ko↑sUpurede↓sUne.
269
+ wav/koni_vocals_08_26_04_374.wav|ha↑ihai↓hai.
270
+ wav/koni_vocals_08_26_04_376.wav|to↑ri di↓sUkuni pa↑NpaNdadesU↓kane.
271
+ wav/koni_vocals_08_26_04_381.wav|i↑re ko↑sUpure.
272
+ wav/koni_vocals_08_26_04_382.wav|e↑eji↓saNno ko↓sUtodesUka.
273
+ wav/koni_vocals_08_26_04_384.wav|ka↓rera ge↓Nzai ʃi↓i hI↑ku bu↓i ki↑Nzaitokujapa↓N.
274
+ wav/koni_vocals_08_26_04_386.wav|ko↑rede sa↑N↓niNno ya↑ku↓ga ki↑nii↓rarereba ku↑nimini na↓Qte na↓nida ko↑no ko↑no bu↑rasUtoge te↓ru ya↓tsuna N↓daroo.
275
+ wav/koni_vocals_08_26_04_391.wav|o↑boe↓toiyoo.
276
+ wav/koni_vocals_08_26_04_394.wav|ta↓ni mi↓ta ko↑to↓na mi↑Nna wa↑ka↓ru no↑kana ko↑koni te↑Njoogane.
277
+ wav/koni_vocals_08_26_04_395.wav|ko↑rena N↓daroo ka↑wai↓inaNte na↓Nka.
278
+ wav/koni_vocals_08_26_04_396.wav|na↓Nkakawaikumiete ki↓ʧaQta N↓dakedo.
279
+ wav/koni_vocals_08_26_04_399.wav|ma↑kete na↑ni↓naN.
280
+ wav/koni_vocals_08_26_04_400.wav|ho↓Ngokuno ko↑mi ki↓ta.
281
+ wav/koni_vocals_08_26_04_402.wav|bo↓kuno ku↑bikirita N↓desU na↓Nda e.
282
+ wav/koni_vocals_08_26_04_403.wav|to↓kuni ko↑rede i↑Qta↓Qte ko↑to o↓oiyo o↓oi.
283
+ wav/koni_vocals_08_26_04_404.wav|a↑kete hi↑wa ya↑me mi↓Qkuyo.
284
+ wav/koni_vocals_08_26_04_409.wav|i↓ito o↑mo↓uyo ka↓nari ko↑rewa ka↑renai ji↑Neeno te↑kIfa↓QʃoNnato ma↓ma.
285
+ wav/koni_vocals_08_26_04_410.wav|wa↑taʃio o↑ite i↑nai yo↓ona a↑jino ko↓iiito o↑mo↓uga a.
286
+ wav/koni_vocals_08_26_04_413.wav|i↓tsumo hI↑todaQta N↓da e↑efo↓oru e↑ito↓orudaQta no↑ka.
287
+ wav/koni_vocals_08_26_04_416.wav|ha↓i j a ma↓zu i↑toe↓e.
288
+ wav/koni_vocals_08_26_04_417.wav|hi↑rogemite ku↑ukaNni a↑nofurui i↑efu↓kuidesUne e↓N fU↑ka↓i N↓desUyo ka↓nari fU↑ku↓ni me↑ʧakUʧa i↓idesUkedo.
289
+ wav/koni_vocals_08_26_04_420.wav|ko↑rewa sU↑teru↓no.
290
+ wav/koni_vocals_08_26_04_422.wav|do↓Qka i↑ta↓ihajikeru yo↓oni su↑go↓kure ku↓uru ki↑aigaQte ko↑to↓desUka.
291
+ wav/koni_vocals_08_26_04_423.wav|su↓gu ko↓o i↑Qʧa i↑kenai.
292
+ wav/koni_vocals_08_26_04_424.wav|ko↑re↓ano bu↓NbuN bu↓NbuN bu↓NbuN to↑kUhoNbu↓NpooQte i↓u kyo↑kuga na↑gare↓ruyaQte ya↑N.
293
+ wav/koni_vocals_08_26_04_425.wav|do↓oyuutaine.
294
+ wav/koni_vocals_08_26_04_427.wav|ʧa↑oʧaoʧaote i↑Qte ko↓o ya↓Qtedeʃoo sa↑ke↓ru no↑ga sa↑N.
295
+ wav/koni_vocals_08_26_04_428.wav|kI↑kuni na↓Qta yo↓ona ba↑ʃoga.
296
+ wav/koni_vocals_08_26_04_431.wav|ko↑rewa ji↑buNga ka↑Qkoi↓iQte yo↓Qte i↑ru no↑ka.
297
+ wav/koni_vocals_08_26_04_432.wav|ji↑buNji↓ʃiN yo↓kU ka↓Qko e↓eto i↑Qte i↑ru no↑ka.
298
+ wav/koni_vocals_08_26_04_433.wav|so↑reto↓mo ko↑no ka↑eno ke↑Qkao ka↓koeto i↑Qteno ko↑to.
299
+ wav/koni_vocals_08_26_04_434.wav|to↓Qʧa N↓ni tsu↓koja.
300
+ wav/koni_vocals_08_26_04_443.wav|e↓nu ki↑ji↓saitowa mo↓iʧaN.
301
+ wav/koni_vocals_08_26_04_447.wav|de↓ruʧa N↓daʃi.
302
+ wav/koni_vocals_08_26_04_449.wav|fU↑ku↓wa i↓iyo na↓Nka ko↑no k o ʃI↑ta↓ga su↑boNde↓ru.
303
+ wav/koni_vocals_08_26_04_450.wav|a↑no pa↓NtsU ka↑Qkoi↓i a↑tarijadaQta↓atokaga ha↑ki↓soona so↑reni ʃi↑ro↓ine su↑ni↓ikaade ʃi↑ro↓ʃatsU ku↑ro↓i ja↑keQtoQ ʧi↓gono ha↑ne.
304
+ wav/koni_vocals_08_26_04_451.wav|ka↓nari i↓i te↑Nda jo↑ʃi↓ukewa ko↑oso↓obi Q↓ʧa.
305
+ wav/koni_vocals_08_26_04_452.wav|da↓kara de↑eto↓fUkuto ʃI↑tewa.
306
+ wav/koni_vocals_08_26_04_453.wav|me↑ʧakUʧate i↓tsuno ta↑kUhai.
307
+ wav/koni_vocals_08_26_04_455.wav|ko↑reka↓reʃiga ki↓te ki↓tara.
308
+ wav/koni_vocals_08_26_04_456.wav|mi↑Nna yo↑roko↓buto o↑mo↓u da↑igakU ka↑Qkoi↓ikedonaato o↑mo↓u.
309
+ wav/koni_vocals_08_26_04_457.wav|ta↑dasemase↓N ta↓dadesUne.
310
+ wav/koni_vocals_08_26_04_458.wav|a↑no ku↑reeru↓nekono ho↑ohaikuru↓unanode o↑imainasu↓teNni sa↑sete i↑tadaki o↑iʃi↓i.
311
+ wav/koni_vocals_08_26_04_459.wav|ko↑jiNto↓odesUkara ha↑idomaNte↓N a↑ge↓temo yo↓kaQtadesUkedone.
312
+ wav/koni_vocals_08_26_04_462.wav|sa↓ikuriNgunode.
313
+ wav/koni_vocals_08_26_04_464.wav|a↑no o↓beeni na↓QtedesUne a↑ka↓keNdesU fu↑gookaku↓hai.
314
+ wav/koni_vocals_08_26_04_467.wav|yo↓ona ka↑taʧide↓mo i↑i↓ʃine na↓Nka ʧa↑Nto ka↑taiga i↓i yo↓ona ka↑Njiga ʃI↑te ka↑Qkoi↓inato i↑u i↑me↓eji.
315
+ wav/koni_vocals_08_26_04_470.wav|na↑NkaʃiNpoʃi↓Npodede sa↑ikiNno.
316
+ wav/koni_vocals_08_26_04_471.wav|na↑N↓keNna ka↑Qkoi↓io to↑riireta ka↑Njio.
317
+ wav/koni_vocals_08_26_04_472.wav|i↑i ni↑ge↓desUne tsu↑gi mi↓te mi↓yoo do↓bea ʧo↑ota↓Nkino a↑no ʃi↑geNyo↓saNno ho↓owa e↑egane ko↑Qʧi↓wa i↑QʧaQte su↑mimase↓N.
318
+ wav/koni_vocals_08_26_04_473.wav|ko↑re yo↓kaQtayone ka↓too e↑ega↓hai.
319
+ wav/koni_vocals_08_26_04_474.wav|ʧo↓Qto na↓Nka.
320
+ wav/koni_vocals_08_26_04_475.wav|na↓Nde ka↑oni ko↑rega tsu↑ite↓ru no↑ga ʧo↓Qto wa↑kaN↓naidesUkedo.
321
+ wav/koni_vocals_08_26_04_476.wav|a↑rigatoage↓naito su↑umaN↓neN ʧo↓Qto ba↑raba↓rani na↓QʧaQte.
322
+ wav/koni_vocals_08_26_04_480.wav|so↑ko↓soreo hi↑Qpareo ki↑itenaiyo.
323
+ wav/koni_vocals_08_26_04_482.wav|ʧo↓Qto ma↑te↓te ma↑te↓te ma↑te↓te ma↑te↓mate yo↑itomakehaikyoo↓orajaniika.
324
+ wav/koni_vocals_08_26_04_486.wav|kyo↑oyoona N↓janaidesUka.
325
+ wav/koni_vocals_08_26_04_487.wav|ʧo↓ojo o↑na↓maeto.
326
+ wav/koni_vocals_08_26_04_488.wav|ta↓itorudesUka yo↑ogifu↓kee.
327
+ wav/koni_vocals_08_26_04_489.wav|i↑roironi tsU↑kiseki↓uN.
328
+ wav/koni_vocals_08_26_04_490.wav|ta↓ʃIkani so↑o i↑warete mi↓reba fu↑Ni↓kiwa i↓ina ma↑ta ko↓iyo i↑QʧaN.
329
+ wav/koni_vocals_08_26_04_494.wav|ko↑rega kyo↓otoka ko↑yoo↓jinarane.
330
+ wav/koni_vocals_08_26_04_496.wav|sa↑ineNʧoojuu↓gawa yo↓ku mi↓te na↑sa↓iyoQte ka↑Njide.
331
+ wav/koni_vocals_08_26_04_502.wav|ka↑Qkoi↓iyo.
332
+ wav/koni_vocals_08_26_04_503.wav|to↑a↓Qpuni ʃI↑temo ka↑Qkoi↓iQte na↓ni.
333
+ wav/koni_vocals_08_26_04_504.wav|me↑ʧakUʧa i↓i.
334
+ wav/koni_vocals_08_26_04_505.wav|na↓Nka ʧo↓Qto wa↑ka↓iwakai ko↓emota mi↓taina no↑ga ha↓iQte yo↓kunai.
335
+ wav/koni_vocals_08_26_04_506.wav|ja↓a mo↓o ko↓jiNniwa ko↓jiNniwa mo↓o da↑iuʃinawarete ʃi↑ma↓Qta a↑no ko↓rono e↑mo↓sa mi↓taina n o ka↑Njiru N↓dakedo.
336
+ wav/koni_vocals_08_26_04_507.wav|ʃI↑ka↓mo ja↓NkU ʧa↓u.
337
+ wav/koni_vocals_08_26_04_508.wav|na↓Nte o↑Nna↓noko.
338
+ wav/koni_vocals_08_26_04_509.wav|ho↓NniNno kU↑ʧini i↓kio a↑geta↓toQte.
339
+ wav/koni_vocals_08_26_04_510.wav|so↓rao o↑ome↓de na↑Nka↓soreQpoiyone su↑go↓ini go↑ki↓desUka a↑aaʧiruno↓koreni o↑kIta↓ino.
340
+ wav/koni_vocals_08_26_04_511.wav|so↑ʃIte hI↑todakejanai.
341
+ wav/koni_vocals_08_26_04_512.wav|ka↑Qkoi↓iyo.
342
+ wav/koni_vocals_08_26_04_513.wav|ni↑Qpo↓N ki↓ta N↓da e.
343
+ wav/koni_vocals_08_26_04_518.wav|kyo↓neNto wa↑ka↓N na↓i N↓dayona ki↓Nni ni↑Qpo↓Nno fU↑ku↓no bu↑raNdoQte ko↑to↓dayone.
344
+ wav/koni_vocals_08_26_04_519.wav|i↑yaa↓muzukaʃiiyo.
345
+ wav/koni_vocals_08_26_04_520.wav|wa↑kaN↓naina na↓Nka do↓oyuu fU↑ku↓o kI↑ta↓i no↑kani yo↑Qtewa.
346
+ wav/koni_vocals_08_26_04_521.wav|o↑susume de↑ki↓rukedo.
347
+ wav/koni_vocals_08_26_04_522.wav|i↑ma↓niNwa ni↑Qpo↓Nno.
gitignore (1).txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DUMMY1
2
+ DUMMY2
3
+ DUMMY3
4
+ logs
5
+ __pycache__
6
+ .ipynb_checkpoints
7
+ .*.swp
8
+
9
+ build
10
+ *.c
11
+ monotonic_align/monotonic_align
12
+ /.vs/vits/FileContentIndex
inference.ipynb ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "%matplotlib inline\n",
10
+ "import matplotlib.pyplot as plt\n",
11
+ "import IPython.display as ipd\n",
12
+ "\n",
13
+ "import os\n",
14
+ "import json\n",
15
+ "import math\n",
16
+ "import torch\n",
17
+ "from torch import nn\n",
18
+ "from torch.nn import functional as F\n",
19
+ "from torch.utils.data import DataLoader\n",
20
+ "\n",
21
+ "import commons\n",
22
+ "import utils\n",
23
+ "from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate\n",
24
+ "from models import SynthesizerTrn\n",
25
+ "from text.symbols import symbols\n",
26
+ "from text import text_to_sequence\n",
27
+ "\n",
28
+ "from scipy.io.wavfile import write\n",
29
+ "\n",
30
+ "\n",
31
+ "def get_text(text, hps):\n",
32
+ " text_norm = text_to_sequence(text, hps.data.text_cleaners)\n",
33
+ " if hps.data.add_blank:\n",
34
+ " text_norm = commons.intersperse(text_norm, 0)\n",
35
+ " text_norm = torch.LongTensor(text_norm)\n",
36
+ " return text_norm"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "markdown",
41
+ "metadata": {},
42
+ "source": [
43
+ "## LJ Speech"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": null,
49
+ "metadata": {},
50
+ "outputs": [],
51
+ "source": [
52
+ "hps = utils.get_hparams_from_file(\"./configs/ljs_base.json\")"
53
+ ]
54
+ },
55
+ {
56
+ "cell_type": "code",
57
+ "execution_count": null,
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "net_g = SynthesizerTrn(\n",
62
+ " len(symbols),\n",
63
+ " hps.data.filter_length // 2 + 1,\n",
64
+ " hps.train.segment_size // hps.data.hop_length,\n",
65
+ " **hps.model).cuda()\n",
66
+ "_ = net_g.eval()\n",
67
+ "\n",
68
+ "_ = utils.load_checkpoint(\"/path/to/pretrained_ljs.pth\", net_g, None)"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": null,
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": []
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": null,
81
+ "metadata": {},
82
+ "outputs": [],
83
+ "source": [
84
+ "stn_tst = get_text(\"VITS is Awesome!\", hps)\n",
85
+ "with torch.no_grad():\n",
86
+ " x_tst = stn_tst.cuda().unsqueeze(0)\n",
87
+ " x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()\n",
88
+ " audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()\n",
89
+ "ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "markdown",
94
+ "metadata": {},
95
+ "source": [
96
+ "## VCTK"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": null,
102
+ "metadata": {},
103
+ "outputs": [],
104
+ "source": [
105
+ "hps = utils.get_hparams_from_file(\"./configs/vctk_base.json\")"
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "code",
110
+ "execution_count": null,
111
+ "metadata": {},
112
+ "outputs": [],
113
+ "source": [
114
+ "net_g = SynthesizerTrn(\n",
115
+ " len(symbols),\n",
116
+ " hps.data.filter_length // 2 + 1,\n",
117
+ " hps.train.segment_size // hps.data.hop_length,\n",
118
+ " n_speakers=hps.data.n_speakers,\n",
119
+ " **hps.model).cuda()\n",
120
+ "_ = net_g.eval()\n",
121
+ "\n",
122
+ "_ = utils.load_checkpoint(\"/path/to/pretrained_vctk.pth\", net_g, None)"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "metadata": {},
129
+ "outputs": [],
130
+ "source": [
131
+ "stn_tst = get_text(\"VITS is Awesome!\", hps)\n",
132
+ "with torch.no_grad():\n",
133
+ " x_tst = stn_tst.cuda().unsqueeze(0)\n",
134
+ " x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()\n",
135
+ " sid = torch.LongTensor([4]).cuda()\n",
136
+ " audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()\n",
137
+ "ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "markdown",
142
+ "metadata": {},
143
+ "source": [
144
+ "### Voice Conversion"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "metadata": {},
151
+ "outputs": [],
152
+ "source": [
153
+ "dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)\n",
154
+ "collate_fn = TextAudioSpeakerCollate()\n",
155
+ "loader = DataLoader(dataset, num_workers=8, shuffle=False,\n",
156
+ " batch_size=1, pin_memory=True,\n",
157
+ " drop_last=True, collate_fn=collate_fn)\n",
158
+ "data_list = list(loader)"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": null,
164
+ "metadata": {},
165
+ "outputs": [],
166
+ "source": [
167
+ "with torch.no_grad():\n",
168
+ " x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cuda() for x in data_list[0]]\n",
169
+ " sid_tgt1 = torch.LongTensor([1]).cuda()\n",
170
+ " sid_tgt2 = torch.LongTensor([2]).cuda()\n",
171
+ " sid_tgt3 = torch.LongTensor([4]).cuda()\n",
172
+ " audio1 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[0][0,0].data.cpu().float().numpy()\n",
173
+ " audio2 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt2)[0][0,0].data.cpu().float().numpy()\n",
174
+ " audio3 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt3)[0][0,0].data.cpu().float().numpy()\n",
175
+ "print(\"Original SID: %d\" % sid_src.item())\n",
176
+ "ipd.display(ipd.Audio(y[0].cpu().numpy(), rate=hps.data.sampling_rate, normalize=False))\n",
177
+ "print(\"Converted SID: %d\" % sid_tgt1.item())\n",
178
+ "ipd.display(ipd.Audio(audio1, rate=hps.data.sampling_rate, normalize=False))\n",
179
+ "print(\"Converted SID: %d\" % sid_tgt2.item())\n",
180
+ "ipd.display(ipd.Audio(audio2, rate=hps.data.sampling_rate, normalize=False))\n",
181
+ "print(\"Converted SID: %d\" % sid_tgt3.item())\n",
182
+ "ipd.display(ipd.Audio(audio3, rate=hps.data.sampling_rate, normalize=False))"
183
+ ]
184
+ }
185
+ ],
186
+ "metadata": {
187
+ "kernelspec": {
188
+ "display_name": "Python 3.6.9 64-bit",
189
+ "language": "python",
190
+ "name": "python3"
191
+ },
192
+ "language_info": {
193
+ "codemirror_mode": {
194
+ "name": "ipython",
195
+ "version": 3
196
+ },
197
+ "file_extension": ".py",
198
+ "mimetype": "text/x-python",
199
+ "name": "python",
200
+ "nbconvert_exporter": "python",
201
+ "pygments_lexer": "ipython3",
202
+ "version": "3.6.9"
203
+ },
204
+ "vscode": {
205
+ "interpreter": {
206
+ "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
207
+ }
208
+ }
209
+ },
210
+ "nbformat": 4,
211
+ "nbformat_minor": 4
212
+ }
inference.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ import json
5
+ import math
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+ from torch.utils.data import DataLoader
10
+
11
+ import commons
12
+ import utils
13
+ from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
14
+ from models import SynthesizerTrn
15
+ from text.symbols import symbols
16
+ from text import text_to_sequence, cleaned_text_to_sequence
17
+ from text.cleaners import japanese_cleaners
18
+ from scipy.io.wavfile import write
19
+
20
+
21
+
22
+ def get_text(text, hps):
23
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
24
+ if hps.data.add_blank:
25
+ text_norm = commons.intersperse(text_norm, 0)
26
+ text_norm = torch.LongTensor(text_norm)
27
+ # print(text_norm.shape)
28
+ return text_norm
29
+
30
+ hps = utils.get_hparams_from_file("/mnt/vits_koni/configs/japanese_base.json")
31
+
32
+ net_g = SynthesizerTrn(
33
+ len(symbols),
34
+ hps.data.filter_length // 2 + 1,
35
+ hps.train.segment_size // hps.data.hop_length,
36
+ **hps.model).cuda()
37
+ _ = net_g.eval()
38
+
39
+
40
+ _ = utils.load_checkpoint("/mnt/vits_koni/MyDrive/japanese_base/G_42000.pth", net_g, None)
41
+
42
+
43
+ def tts(text):
44
+ if len(text) > 150:
45
+ return "Error: Text is too long", None
46
+ stn_tst = get_text(text, hps)
47
+
48
+ with torch.no_grad():
49
+ x_tst = stn_tst.cuda().unsqueeze(0)
50
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
51
+ # print(stn_tst.size())
52
+ audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=2)[0][
53
+ 0, 0].data.cpu().float().numpy()
54
+ return hps.data.sampling_rate, audio
55
+
56
+ sampling_rate, infer_audio = tts("にーまーまーすーろーぁ")
57
+ write("/mnt/vits_koni/MyDrive/japanese_base/inferwav/konitest3.wav", sampling_rate, infer_audio)
58
+ print("1")
59
+
60
+
log.log ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/
2
+ Collecting Cython==0.29.21
3
+ Downloading Cython-0.29.21-cp37-cp37m-manylinux1_x86_64.whl (2.0 MB)
4
+ Collecting librosa==0.8.0
5
+ Downloading librosa-0.8.0.tar.gz (183 kB)
6
+ Collecting matplotlib==3.3.1
7
+ Downloading matplotlib-3.3.1-cp37-cp37m-manylinux1_x86_64.whl (11.6 MB)
8
+ Requirement already satisfied: numpy==1.21.6 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 4)) (1.21.6)
9
+ Collecting phonemizer==2.2.1
10
+ Downloading phonemizer-2.2.1-py3-none-any.whl (49 kB)
11
+ Collecting scipy==1.5.2
12
+ Downloading scipy-1.5.2-cp37-cp37m-manylinux1_x86_64.whl (25.9 MB)
13
+ Collecting tensorboard==2.3.0
14
+ Downloading tensorboard-2.3.0-py3-none-any.whl (6.8 MB)
15
+ Collecting torch==1.6.0
16
+ Downloading torch-1.6.0-cp37-cp37m-manylinux1_x86_64.whl (748.8 MB)
17
+ Collecting torchvision==0.7.0
18
+ Downloading torchvision-0.7.0-cp37-cp37m-manylinux1_x86_64.whl (5.9 MB)
19
+ Collecting Unidecode==1.1.1
20
+ Downloading Unidecode-1.1.1-py2.py3-none-any.whl (238 kB)
21
+ Collecting pyopenjtalk==0.2.0
22
+ Downloading pyopenjtalk-0.2.0.tar.gz (1.5 MB)
23
+ Installing build dependencies: started
24
+ Installing build dependencies: finished with status 'done'
25
+ Getting requirements to build wheel: started
26
+ Getting requirements to build wheel: finished with status 'done'
27
+ Preparing wheel metadata: started
28
+ Preparing wheel metadata: finished with status 'done'
29
+ Collecting jamo==0.4.1
30
+ Downloading jamo-0.4.1-py3-none-any.whl (9.5 kB)
31
+ Collecting pypinyin==0.44.0
32
+ Downloading pypinyin-0.44.0-py2.py3-none-any.whl (1.3 MB)
33
+ Requirement already satisfied: jieba==0.42.1 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 14)) (0.42.1)
34
+ Requirement already satisfied: audioread>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (2.1.9)
35
+ Requirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (1.0.2)
36
+ Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (1.1.0)
37
+ Requirement already satisfied: decorator>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (4.4.2)
38
+ Requirement already satisfied: resampy>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (0.4.0)
39
+ Requirement already satisfied: numba>=0.43.0 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (0.56.0)
40
+ Requirement already satisfied: soundfile>=0.9.0 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (0.10.3.post1)
41
+ Requirement already satisfied: pooch>=1.0 in /usr/local/lib/python3.7/dist-packages (from librosa==0.8.0->-r requirements.txt (line 2)) (1.6.0)
42
+ Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.1->-r requirements.txt (line 3)) (3.0.9)
43
+ Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.1->-r requirements.txt (line 3)) (0.11.0)
44
+ Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.1->-r requirements.txt (line 3)) (2.8.2)
45
+ Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.1->-r requirements.txt (line 3)) (7.1.2)
46
+ Requirement already satisfied: certifi>=2020.06.20 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.1->-r requirements.txt (line 3)) (2022.6.15)
47
+ Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib==3.3.1->-r requirements.txt (line 3)) (1.4.4)
48
+ Collecting segments
49
+ Downloading segments-2.2.1-py2.py3-none-any.whl (15 kB)
50
+ Requirement already satisfied: attrs>=18.1 in /usr/local/lib/python3.7/dist-packages (from phonemizer==2.2.1->-r requirements.txt (line 5)) (22.1.0)
51
+ Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (1.35.0)
52
+ Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (57.4.0)
53
+ Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (1.8.1)
54
+ Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (0.37.1)
55
+ Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (1.47.0)
56
+ Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (0.4.6)
57
+ Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (2.23.0)
58
+ Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (3.17.3)
59
+ Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (1.2.0)
60
+ Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (3.4.1)
61
+ Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (1.15.0)
62
+ Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard==2.3.0->-r requirements.txt (line 7)) (1.0.1)
63
+ Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from torch==1.6.0->-r requirements.txt (line 8)) (0.16.0)
64
+ Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from pyopenjtalk==0.2.0->-r requirements.txt (line 11)) (4.64.0)
65
+ Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard==2.3.0->-r requirements.txt (line 7)) (4.9)
66
+ Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard==2.3.0->-r requirements.txt (line 7)) (4.2.4)
67
+ Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard==2.3.0->-r requirements.txt (line 7)) (0.2.8)
68
+ Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard==2.3.0->-r requirements.txt (line 7)) (1.3.1)
69
+ Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from kiwisolver>=1.0.1->matplotlib==3.3.1->-r requirements.txt (line 3)) (4.1.1)
70
+ Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard==2.3.0->-r requirements.txt (line 7)) (4.12.0)
71
+ Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard==2.3.0->-r requirements.txt (line 7)) (3.8.1)
72
+ Requirement already satisfied: llvmlite<0.40,>=0.39.0dev0 in /usr/local/lib/python3.7/dist-packages (from numba>=0.43.0->librosa==0.8.0->-r requirements.txt (line 2)) (0.39.0)
73
+ Requirement already satisfied: appdirs>=1.3.0 in /usr/local/lib/python3.7/dist-packages (from pooch>=1.0->librosa==0.8.0->-r requirements.txt (line 2)) (1.4.4)
74
+ Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from pooch>=1.0->librosa==0.8.0->-r requirements.txt (line 2)) (21.3)
75
+ Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard==2.3.0->-r requirements.txt (line 7)) (0.4.8)
76
+ Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard==2.3.0->-r requirements.txt (line 7)) (1.24.3)
77
+ Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard==2.3.0->-r requirements.txt (line 7)) (3.0.4)
78
+ Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard==2.3.0->-r requirements.txt (line 7)) (2.10)
79
+ Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard==2.3.0->-r requirements.txt (line 7)) (3.2.0)
80
+ Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn!=0.19.0,>=0.14.0->librosa==0.8.0->-r requirements.txt (line 2)) (3.1.0)
81
+ Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.7/dist-packages (from soundfile>=0.9.0->librosa==0.8.0->-r requirements.txt (line 2)) (1.15.1)
82
+ Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.0->soundfile>=0.9.0->librosa==0.8.0->-r requirements.txt (line 2)) (2.21)
83
+ Collecting csvw>=1.5.6
84
+ Downloading csvw-3.1.1-py2.py3-none-any.whl (56 kB)
85
+ Requirement already satisfied: regex in /usr/local/lib/python3.7/dist-packages (from segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (2022.6.2)
86
+ Collecting clldutils>=1.7.3
87
+ Downloading clldutils-3.12.0-py2.py3-none-any.whl (197 kB)
88
+ Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.7/dist-packages (from clldutils>=1.7.3->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (0.8.10)
89
+ Collecting colorlog
90
+ Downloading colorlog-6.6.0-py2.py3-none-any.whl (11 kB)
91
+ Requirement already satisfied: babel in /usr/local/lib/python3.7/dist-packages (from csvw>=1.5.6->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (2.10.3)
92
+ Collecting rdflib
93
+ Downloading rdflib-6.2.0-py3-none-any.whl (500 kB)
94
+ Requirement already satisfied: jsonschema in /usr/local/lib/python3.7/dist-packages (from csvw>=1.5.6->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (4.3.3)
95
+ Collecting colorama
96
+ Downloading colorama-0.4.5-py2.py3-none-any.whl (16 kB)
97
+ Collecting rfc3986<2
98
+ Downloading rfc3986-1.5.0-py2.py3-none-any.whl (31 kB)
99
+ Collecting language-tags
100
+ Downloading language_tags-1.1.0-py2.py3-none-any.whl (210 kB)
101
+ Collecting isodate
102
+ Downloading isodate-0.6.1-py2.py3-none-any.whl (41 kB)
103
+ Requirement already satisfied: uritemplate>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from csvw>=1.5.6->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (3.0.1)
104
+ Requirement already satisfied: pytz>=2015.7 in /usr/local/lib/python3.7/dist-packages (from babel->csvw>=1.5.6->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (2022.1)
105
+ Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema->csvw>=1.5.6->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (0.18.1)
106
+ Requirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema->csvw>=1.5.6->segments->phonemizer==2.2.1->-r requirements.txt (line 5)) (5.9.0)
107
+ Building wheels for collected packages: librosa, pyopenjtalk
108
+ Building wheel for librosa (setup.py): started
109
+ Building wheel for librosa (setup.py): finished with status 'done'
110
+ Created wheel for librosa: filename=librosa-0.8.0-py3-none-any.whl size=201396 sha256=69a746a2373b77774c1b66e31e7eba0bfedeb1d18e378aa9180fd3f7d4019e57
111
+ Stored in directory: /root/.cache/pip/wheels/de/1e/aa/d91797ae7e1ce11853ee100bee9d1781ae9d750e7458c95afb
112
+ Building wheel for pyopenjtalk (PEP 517): started
113
+ Building wheel for pyopenjtalk (PEP 517): finished with status 'done'
114
+ Created wheel for pyopenjtalk: filename=pyopenjtalk-0.2.0-cp37-cp37m-linux_x86_64.whl size=4431836 sha256=68551b95c2c9065b6654c4e04e0e1631c14d9149046836224effdb990ad77f71
115
+ Stored in directory: /root/.cache/pip/wheels/10/56/0e/435dc1aec0d8614a489abfc51da4fd54ff6e8b33bf978f2081
116
+ Successfully built librosa pyopenjtalk
117
+ Installing collected packages: isodate, rfc3986, rdflib, language-tags, colorama, csvw, colorlog, scipy, clldutils, torch, segments, Cython, Unidecode, torchvision, tensorboard, pypinyin, pyopenjtalk, phonemizer, matplotlib, librosa, jamo
118
+ Attempting uninstall: scipy
119
+ Found existing installation: scipy 1.7.3
120
+ Uninstalling scipy-1.7.3:
121
+ Successfully uninstalled scipy-1.7.3
122
+ Attempting uninstall: torch
123
+ Found existing installation: torch 1.12.1+cu113
124
+ Uninstalling torch-1.12.1+cu113:
125
+ Successfully uninstalled torch-1.12.1+cu113
126
+ Attempting uninstall: Cython
127
+ Found existing installation: Cython 0.29.32
128
+ Uninstalling Cython-0.29.32:
129
+ Successfully uninstalled Cython-0.29.32
130
+ Attempting uninstall: torchvision
131
+ Found existing installation: torchvision 0.13.1+cu113
132
+ Uninstalling torchvision-0.13.1+cu113:
133
+ Successfully uninstalled torchvision-0.13.1+cu113
134
+ Attempting uninstall: tensorboard
135
+ Found existing installation: tensorboard 2.8.0
136
+ Uninstalling tensorboard-2.8.0:
137
+ Successfully uninstalled tensorboard-2.8.0
138
+ Attempting uninstall: matplotlib
139
+ Found existing installation: matplotlib 3.2.2
140
+ Uninstalling matplotlib-3.2.2:
141
+ Successfully uninstalled matplotlib-3.2.2
142
+ Attempting uninstall: librosa
143
+ Found existing installation: librosa 0.8.1
144
+ Uninstalling librosa-0.8.1:
145
+ Successfully uninstalled librosa-0.8.1
146
+ Successfully installed Cython-0.29.21 Unidecode-1.1.1 clldutils-3.12.0 colorama-0.4.5 colorlog-6.6.0 csvw-3.1.1 isodate-0.6.1 jamo-0.4.1 language-tags-1.1.0 librosa-0.8.0 matplotlib-3.3.1 phonemizer-2.2.1 pyopenjtalk-0.2.0 pypinyin-0.44.0 rdflib-6.2.0 rfc3986-1.5.0 scipy-1.5.2 segments-2.2.1 tensorboard-2.3.0 torch-1.6.0 torchvision-0.7.0
losses.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import commons
5
+
6
+
7
+ def feature_loss(fmap_r, fmap_g):
8
+ loss = 0
9
+ for dr, dg in zip(fmap_r, fmap_g):
10
+ for rl, gl in zip(dr, dg):
11
+ rl = rl.float().detach()
12
+ gl = gl.float()
13
+ loss += torch.mean(torch.abs(rl - gl))
14
+
15
+ return loss * 2
16
+
17
+
18
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
+ loss = 0
20
+ r_losses = []
21
+ g_losses = []
22
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
+ dr = dr.float()
24
+ dg = dg.float()
25
+ r_loss = torch.mean((1-dr)**2)
26
+ g_loss = torch.mean(dg**2)
27
+ loss += (r_loss + g_loss)
28
+ r_losses.append(r_loss.item())
29
+ g_losses.append(g_loss.item())
30
+
31
+ return loss, r_losses, g_losses
32
+
33
+
34
+ def generator_loss(disc_outputs):
35
+ loss = 0
36
+ gen_losses = []
37
+ for dg in disc_outputs:
38
+ dg = dg.float()
39
+ l = torch.mean((1-dg)**2)
40
+ gen_losses.append(l)
41
+ loss += l
42
+
43
+ return loss, gen_losses
44
+
45
+
46
+ def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
+ """
48
+ z_p, logs_q: [b, h, t_t]
49
+ m_p, logs_p: [b, h, t_t]
50
+ """
51
+ z_p = z_p.float()
52
+ logs_q = logs_q.float()
53
+ m_p = m_p.float()
54
+ logs_p = logs_p.float()
55
+ z_mask = z_mask.float()
56
+
57
+ kl = logs_p - logs_q - 0.5
58
+ kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
+ kl = torch.sum(kl * z_mask)
60
+ l = kl / torch.sum(z_mask)
61
+ return l
mel_processing.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import random
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+ import torch.utils.data
8
+ import numpy as np
9
+ import librosa
10
+ import librosa.util as librosa_util
11
+ from librosa.util import normalize, pad_center, tiny
12
+ from scipy.signal import get_window
13
+ from scipy.io.wavfile import read
14
+ from librosa.filters import mel as librosa_mel_fn
15
+
16
+ MAX_WAV_VALUE = 32768.0
17
+
18
+
19
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
20
+ """
21
+ PARAMS
22
+ ------
23
+ C: compression factor
24
+ """
25
+ return torch.log(torch.clamp(x, min=clip_val) * C)
26
+
27
+
28
+ def dynamic_range_decompression_torch(x, C=1):
29
+ """
30
+ PARAMS
31
+ ------
32
+ C: compression factor used to compress
33
+ """
34
+ return torch.exp(x) / C
35
+
36
+
37
+ def spectral_normalize_torch(magnitudes):
38
+ output = dynamic_range_compression_torch(magnitudes)
39
+ return output
40
+
41
+
42
+ def spectral_de_normalize_torch(magnitudes):
43
+ output = dynamic_range_decompression_torch(magnitudes)
44
+ return output
45
+
46
+
47
+ mel_basis = {}
48
+ hann_window = {}
49
+
50
+
51
+ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
52
+ if torch.min(y) < -1.:
53
+ print('min value is ', torch.min(y))
54
+ if torch.max(y) > 1.:
55
+ print('max value is ', torch.max(y))
56
+
57
+ global hann_window
58
+ dtype_device = str(y.dtype) + '_' + str(y.device)
59
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
60
+ if wnsize_dtype_device not in hann_window:
61
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
62
+
63
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
64
+ y = y.squeeze(1)
65
+
66
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
67
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
68
+
69
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
70
+ return spec
71
+
72
+
73
+ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
74
+ global mel_basis
75
+ dtype_device = str(spec.dtype) + '_' + str(spec.device)
76
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
77
+ if fmax_dtype_device not in mel_basis:
78
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
79
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
80
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
81
+ spec = spectral_normalize_torch(spec)
82
+ return spec
83
+
84
+
85
+ def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
86
+ if torch.min(y) < -1.:
87
+ print('min value is ', torch.min(y))
88
+ if torch.max(y) > 1.:
89
+ print('max value is ', torch.max(y))
90
+
91
+ global mel_basis, hann_window
92
+ dtype_device = str(y.dtype) + '_' + str(y.device)
93
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
94
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
95
+ if fmax_dtype_device not in mel_basis:
96
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
97
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
98
+ if wnsize_dtype_device not in hann_window:
99
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
100
+
101
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
102
+ y = y.squeeze(1)
103
+
104
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
105
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
106
+
107
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
108
+
109
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
110
+ spec = spectral_normalize_torch(spec)
111
+
112
+ return spec
models.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ import commons
8
+ import modules
9
+ import attentions
10
+ import monotonic_align
11
+
12
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
13
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
14
+ from commons import init_weights, get_padding
15
+
16
+
17
+ class StochasticDurationPredictor(nn.Module):
18
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
19
+ super().__init__()
20
+ filter_channels = in_channels # it needs to be removed from future version.
21
+ self.in_channels = in_channels
22
+ self.filter_channels = filter_channels
23
+ self.kernel_size = kernel_size
24
+ self.p_dropout = p_dropout
25
+ self.n_flows = n_flows
26
+ self.gin_channels = gin_channels
27
+
28
+ self.log_flow = modules.Log()
29
+ self.flows = nn.ModuleList()
30
+ self.flows.append(modules.ElementwiseAffine(2))
31
+ for i in range(n_flows):
32
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
33
+ self.flows.append(modules.Flip())
34
+
35
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
36
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
37
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
38
+ self.post_flows = nn.ModuleList()
39
+ self.post_flows.append(modules.ElementwiseAffine(2))
40
+ for i in range(4):
41
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
42
+ self.post_flows.append(modules.Flip())
43
+
44
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
45
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
46
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
47
+ if gin_channels != 0:
48
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
49
+
50
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
51
+ x = torch.detach(x)
52
+ x = self.pre(x)
53
+ if g is not None:
54
+ g = torch.detach(g)
55
+ x = x + self.cond(g)
56
+ x = self.convs(x, x_mask)
57
+ x = self.proj(x) * x_mask
58
+
59
+ if not reverse:
60
+ flows = self.flows
61
+ assert w is not None
62
+
63
+ logdet_tot_q = 0
64
+ h_w = self.post_pre(w)
65
+ h_w = self.post_convs(h_w, x_mask)
66
+ h_w = self.post_proj(h_w) * x_mask
67
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
68
+ z_q = e_q
69
+ for flow in self.post_flows:
70
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
71
+ logdet_tot_q += logdet_q
72
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
73
+ u = torch.sigmoid(z_u) * x_mask
74
+ z0 = (w - u) * x_mask
75
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
76
+ logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
77
+
78
+ logdet_tot = 0
79
+ z0, logdet = self.log_flow(z0, x_mask)
80
+ logdet_tot += logdet
81
+ z = torch.cat([z0, z1], 1)
82
+ for flow in flows:
83
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
84
+ logdet_tot = logdet_tot + logdet
85
+ nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
86
+ return nll + logq # [b]
87
+ else:
88
+ flows = list(reversed(self.flows))
89
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
90
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
91
+ for flow in flows:
92
+ z = flow(z, x_mask, g=x, reverse=reverse)
93
+ z0, z1 = torch.split(z, [1, 1], 1)
94
+ logw = z0
95
+ return logw
96
+
97
+
98
+ class DurationPredictor(nn.Module):
99
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
100
+ super().__init__()
101
+
102
+ self.in_channels = in_channels
103
+ self.filter_channels = filter_channels
104
+ self.kernel_size = kernel_size
105
+ self.p_dropout = p_dropout
106
+ self.gin_channels = gin_channels
107
+
108
+ self.drop = nn.Dropout(p_dropout)
109
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
110
+ self.norm_1 = modules.LayerNorm(filter_channels)
111
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
112
+ self.norm_2 = modules.LayerNorm(filter_channels)
113
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
114
+
115
+ if gin_channels != 0:
116
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
117
+
118
+ def forward(self, x, x_mask, g=None):
119
+ x = torch.detach(x)
120
+ if g is not None:
121
+ g = torch.detach(g)
122
+ x = x + self.cond(g)
123
+ x = self.conv_1(x * x_mask)
124
+ x = torch.relu(x)
125
+ x = self.norm_1(x)
126
+ x = self.drop(x)
127
+ x = self.conv_2(x * x_mask)
128
+ x = torch.relu(x)
129
+ x = self.norm_2(x)
130
+ x = self.drop(x)
131
+ x = self.proj(x * x_mask)
132
+ return x * x_mask
133
+
134
+
135
+ class TextEncoder(nn.Module):
136
+ def __init__(self,
137
+ n_vocab,
138
+ out_channels,
139
+ hidden_channels,
140
+ filter_channels,
141
+ n_heads,
142
+ n_layers,
143
+ kernel_size,
144
+ p_dropout):
145
+ super().__init__()
146
+ self.n_vocab = n_vocab
147
+ self.out_channels = out_channels
148
+ self.hidden_channels = hidden_channels
149
+ self.filter_channels = filter_channels
150
+ self.n_heads = n_heads
151
+ self.n_layers = n_layers
152
+ self.kernel_size = kernel_size
153
+ self.p_dropout = p_dropout
154
+
155
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
156
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
157
+
158
+ self.encoder = attentions.Encoder(
159
+ hidden_channels,
160
+ filter_channels,
161
+ n_heads,
162
+ n_layers,
163
+ kernel_size,
164
+ p_dropout)
165
+ self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
166
+
167
+ def forward(self, x, x_lengths):
168
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
169
+ # print(x.shape)
170
+ x = torch.transpose(x, 1, -1) # [b, h, t]
171
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
172
+
173
+ x = self.encoder(x * x_mask, x_mask)
174
+ stats = self.proj(x) * x_mask
175
+
176
+ m, logs = torch.split(stats, self.out_channels, dim=1)
177
+ return x, m, logs, x_mask
178
+
179
+
180
+ class ResidualCouplingBlock(nn.Module):
181
+ def __init__(self,
182
+ channels,
183
+ hidden_channels,
184
+ kernel_size,
185
+ dilation_rate,
186
+ n_layers,
187
+ n_flows=4,
188
+ gin_channels=0):
189
+ super().__init__()
190
+ self.channels = channels
191
+ self.hidden_channels = hidden_channels
192
+ self.kernel_size = kernel_size
193
+ self.dilation_rate = dilation_rate
194
+ self.n_layers = n_layers
195
+ self.n_flows = n_flows
196
+ self.gin_channels = gin_channels
197
+
198
+ self.flows = nn.ModuleList()
199
+ for i in range(n_flows):
200
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
201
+ self.flows.append(modules.Flip())
202
+
203
+ def forward(self, x, x_mask, g=None, reverse=False):
204
+ if not reverse:
205
+ for flow in self.flows:
206
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
207
+ else:
208
+ for flow in reversed(self.flows):
209
+ x = flow(x, x_mask, g=g, reverse=reverse)
210
+ return x
211
+
212
+
213
+ class PosteriorEncoder(nn.Module):
214
+ def __init__(self,
215
+ in_channels,
216
+ out_channels,
217
+ hidden_channels,
218
+ kernel_size,
219
+ dilation_rate,
220
+ n_layers,
221
+ gin_channels=0):
222
+ super().__init__()
223
+ self.in_channels = in_channels
224
+ self.out_channels = out_channels
225
+ self.hidden_channels = hidden_channels
226
+ self.kernel_size = kernel_size
227
+ self.dilation_rate = dilation_rate
228
+ self.n_layers = n_layers
229
+ self.gin_channels = gin_channels
230
+
231
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
232
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
233
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
234
+
235
+ def forward(self, x, x_lengths, g=None):
236
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
237
+ x = self.pre(x) * x_mask
238
+ x = self.enc(x, x_mask, g=g)
239
+ stats = self.proj(x) * x_mask
240
+ m, logs = torch.split(stats, self.out_channels, dim=1)
241
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
242
+ return z, m, logs, x_mask
243
+
244
+
245
+ class Generator(torch.nn.Module):
246
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
247
+ super(Generator, self).__init__()
248
+ self.num_kernels = len(resblock_kernel_sizes)
249
+ self.num_upsamples = len(upsample_rates)
250
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
251
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
252
+
253
+ self.ups = nn.ModuleList()
254
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
255
+ self.ups.append(weight_norm(
256
+ ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
257
+ k, u, padding=(k-u)//2)))
258
+
259
+ self.resblocks = nn.ModuleList()
260
+ for i in range(len(self.ups)):
261
+ ch = upsample_initial_channel//(2**(i+1))
262
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
263
+ self.resblocks.append(resblock(ch, k, d))
264
+
265
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
266
+ self.ups.apply(init_weights)
267
+
268
+ if gin_channels != 0:
269
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
270
+
271
+ def forward(self, x, g=None):
272
+ x = self.conv_pre(x)
273
+ if g is not None:
274
+ x = x + self.cond(g)
275
+
276
+ for i in range(self.num_upsamples):
277
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
278
+ x = self.ups[i](x)
279
+ xs = None
280
+ for j in range(self.num_kernels):
281
+ if xs is None:
282
+ xs = self.resblocks[i*self.num_kernels+j](x)
283
+ else:
284
+ xs += self.resblocks[i*self.num_kernels+j](x)
285
+ x = xs / self.num_kernels
286
+ x = F.leaky_relu(x)
287
+ x = self.conv_post(x)
288
+ x = torch.tanh(x)
289
+
290
+ return x
291
+
292
+ def remove_weight_norm(self):
293
+ print('Removing weight norm...')
294
+ for l in self.ups:
295
+ remove_weight_norm(l)
296
+ for l in self.resblocks:
297
+ l.remove_weight_norm()
298
+
299
+
300
+ class DiscriminatorP(torch.nn.Module):
301
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
302
+ super(DiscriminatorP, self).__init__()
303
+ self.period = period
304
+ self.use_spectral_norm = use_spectral_norm
305
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
306
+ self.convs = nn.ModuleList([
307
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
308
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
309
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
310
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
311
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
312
+ ])
313
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
314
+
315
+ def forward(self, x):
316
+ fmap = []
317
+
318
+ # 1d to 2d
319
+ b, c, t = x.shape
320
+ if t % self.period != 0: # pad first
321
+ n_pad = self.period - (t % self.period)
322
+ x = F.pad(x, (0, n_pad), "reflect")
323
+ t = t + n_pad
324
+ x = x.view(b, c, t // self.period, self.period)
325
+
326
+ for l in self.convs:
327
+ x = l(x)
328
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
329
+ fmap.append(x)
330
+ x = self.conv_post(x)
331
+ fmap.append(x)
332
+ x = torch.flatten(x, 1, -1)
333
+
334
+ return x, fmap
335
+
336
+
337
+ class DiscriminatorS(torch.nn.Module):
338
+ def __init__(self, use_spectral_norm=False):
339
+ super(DiscriminatorS, self).__init__()
340
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
341
+ self.convs = nn.ModuleList([
342
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
343
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
344
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
345
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
346
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
347
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
348
+ ])
349
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
350
+
351
+ def forward(self, x):
352
+ fmap = []
353
+
354
+ for l in self.convs:
355
+ x = l(x)
356
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
357
+ fmap.append(x)
358
+ x = self.conv_post(x)
359
+ fmap.append(x)
360
+ x = torch.flatten(x, 1, -1)
361
+
362
+ return x, fmap
363
+
364
+
365
+ class MultiPeriodDiscriminator(torch.nn.Module):
366
+ def __init__(self, use_spectral_norm=False):
367
+ super(MultiPeriodDiscriminator, self).__init__()
368
+ periods = [2,3,5,7,11]
369
+
370
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
371
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
372
+ self.discriminators = nn.ModuleList(discs)
373
+
374
+ def forward(self, y, y_hat):
375
+ y_d_rs = []
376
+ y_d_gs = []
377
+ fmap_rs = []
378
+ fmap_gs = []
379
+ for i, d in enumerate(self.discriminators):
380
+ y_d_r, fmap_r = d(y)
381
+ y_d_g, fmap_g = d(y_hat)
382
+ y_d_rs.append(y_d_r)
383
+ y_d_gs.append(y_d_g)
384
+ fmap_rs.append(fmap_r)
385
+ fmap_gs.append(fmap_g)
386
+
387
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
388
+
389
+
390
+
391
+ class SynthesizerTrn(nn.Module):
392
+ """
393
+ Synthesizer for Training
394
+ """
395
+
396
+ def __init__(self,
397
+ n_vocab,
398
+ spec_channels,
399
+ segment_size,
400
+ inter_channels,
401
+ hidden_channels,
402
+ filter_channels,
403
+ n_heads,
404
+ n_layers,
405
+ kernel_size,
406
+ p_dropout,
407
+ resblock,
408
+ resblock_kernel_sizes,
409
+ resblock_dilation_sizes,
410
+ upsample_rates,
411
+ upsample_initial_channel,
412
+ upsample_kernel_sizes,
413
+ n_speakers=0,
414
+ gin_channels=0,
415
+ use_sdp=True,
416
+ **kwargs):
417
+
418
+ super().__init__()
419
+ self.n_vocab = n_vocab
420
+ self.spec_channels = spec_channels
421
+ self.inter_channels = inter_channels
422
+ self.hidden_channels = hidden_channels
423
+ self.filter_channels = filter_channels
424
+ self.n_heads = n_heads
425
+ self.n_layers = n_layers
426
+ self.kernel_size = kernel_size
427
+ self.p_dropout = p_dropout
428
+ self.resblock = resblock
429
+ self.resblock_kernel_sizes = resblock_kernel_sizes
430
+ self.resblock_dilation_sizes = resblock_dilation_sizes
431
+ self.upsample_rates = upsample_rates
432
+ self.upsample_initial_channel = upsample_initial_channel
433
+ self.upsample_kernel_sizes = upsample_kernel_sizes
434
+ self.segment_size = segment_size
435
+ self.n_speakers = n_speakers
436
+ self.gin_channels = gin_channels
437
+
438
+ self.use_sdp = use_sdp
439
+
440
+ self.enc_p = TextEncoder(n_vocab,
441
+ inter_channels,
442
+ hidden_channels,
443
+ filter_channels,
444
+ n_heads,
445
+ n_layers,
446
+ kernel_size,
447
+ p_dropout)
448
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
449
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
450
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
451
+
452
+ if use_sdp:
453
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
454
+ else:
455
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
456
+
457
+ if n_speakers > 1:
458
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
459
+
460
+ def forward(self, x, x_lengths, y, y_lengths, sid=None):
461
+
462
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
463
+ if self.n_speakers > 0:
464
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
465
+ else:
466
+ g = None
467
+
468
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
469
+ z_p = self.flow(z, y_mask, g=g)
470
+
471
+ with torch.no_grad():
472
+ # negative cross-entropy
473
+ s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
474
+ neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
475
+ neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
476
+ neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
477
+ neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
478
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
479
+
480
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
481
+ attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
482
+
483
+ w = attn.sum(2)
484
+ if self.use_sdp:
485
+ l_length = self.dp(x, x_mask, w, g=g)
486
+ l_length = l_length / torch.sum(x_mask)
487
+ else:
488
+ logw_ = torch.log(w + 1e-6) * x_mask
489
+ logw = self.dp(x, x_mask, g=g)
490
+ l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
491
+
492
+ # expand prior
493
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
494
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
495
+
496
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
497
+ o = self.dec(z_slice, g=g)
498
+ return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
499
+
500
+ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
501
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
502
+ if self.n_speakers > 0:
503
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
504
+ else:
505
+ g = None
506
+
507
+ if self.use_sdp:
508
+ logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
509
+ else:
510
+ logw = self.dp(x, x_mask, g=g)
511
+ w = torch.exp(logw) * x_mask * length_scale
512
+ w_ceil = torch.ceil(w)
513
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
514
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
515
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
516
+ attn = commons.generate_path(w_ceil, attn_mask)
517
+
518
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
519
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
520
+
521
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
522
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
523
+ o = self.dec((z * y_mask)[:,:,:max_len], g=g)
524
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
525
+
526
+ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
527
+ assert self.n_speakers > 0, "n_speakers have to be larger than 0."
528
+ g_src = self.emb_g(sid_src).unsqueeze(-1)
529
+ g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
530
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
531
+ z_p = self.flow(z, y_mask, g=g_src)
532
+ z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
533
+ o_hat = self.dec(z_hat * y_mask, g=g_tgt)
534
+ return o_hat, y_mask, (z, z_p, z_hat)
535
+
modules.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ import numpy as np
4
+ import scipy
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
+ from torch.nn.utils import weight_norm, remove_weight_norm
11
+
12
+ import commons
13
+ from commons import init_weights, get_padding
14
+ from transforms import piecewise_rational_quadratic_transform
15
+
16
+
17
+ LRELU_SLOPE = 0.1
18
+
19
+
20
+ class LayerNorm(nn.Module):
21
+ def __init__(self, channels, eps=1e-5):
22
+ super().__init__()
23
+ self.channels = channels
24
+ self.eps = eps
25
+
26
+ self.gamma = nn.Parameter(torch.ones(channels))
27
+ self.beta = nn.Parameter(torch.zeros(channels))
28
+
29
+ def forward(self, x):
30
+ x = x.transpose(1, -1)
31
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
+ return x.transpose(1, -1)
33
+
34
+
35
+ class ConvReluNorm(nn.Module):
36
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
37
+ super().__init__()
38
+ self.in_channels = in_channels
39
+ self.hidden_channels = hidden_channels
40
+ self.out_channels = out_channels
41
+ self.kernel_size = kernel_size
42
+ self.n_layers = n_layers
43
+ self.p_dropout = p_dropout
44
+ assert n_layers > 1, "Number of layers should be larger than 0."
45
+
46
+ self.conv_layers = nn.ModuleList()
47
+ self.norm_layers = nn.ModuleList()
48
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
49
+ self.norm_layers.append(LayerNorm(hidden_channels))
50
+ self.relu_drop = nn.Sequential(
51
+ nn.ReLU(),
52
+ nn.Dropout(p_dropout))
53
+ for _ in range(n_layers-1):
54
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
55
+ self.norm_layers.append(LayerNorm(hidden_channels))
56
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
57
+ self.proj.weight.data.zero_()
58
+ self.proj.bias.data.zero_()
59
+
60
+ def forward(self, x, x_mask):
61
+ x_org = x
62
+ for i in range(self.n_layers):
63
+ x = self.conv_layers[i](x * x_mask)
64
+ x = self.norm_layers[i](x)
65
+ x = self.relu_drop(x)
66
+ x = x_org + self.proj(x)
67
+ return x * x_mask
68
+
69
+
70
+ class DDSConv(nn.Module):
71
+ """
72
+ Dialted and Depth-Separable Convolution
73
+ """
74
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
75
+ super().__init__()
76
+ self.channels = channels
77
+ self.kernel_size = kernel_size
78
+ self.n_layers = n_layers
79
+ self.p_dropout = p_dropout
80
+
81
+ self.drop = nn.Dropout(p_dropout)
82
+ self.convs_sep = nn.ModuleList()
83
+ self.convs_1x1 = nn.ModuleList()
84
+ self.norms_1 = nn.ModuleList()
85
+ self.norms_2 = nn.ModuleList()
86
+ for i in range(n_layers):
87
+ dilation = kernel_size ** i
88
+ padding = (kernel_size * dilation - dilation) // 2
89
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
90
+ groups=channels, dilation=dilation, padding=padding
91
+ ))
92
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
93
+ self.norms_1.append(LayerNorm(channels))
94
+ self.norms_2.append(LayerNorm(channels))
95
+
96
+ def forward(self, x, x_mask, g=None):
97
+ if g is not None:
98
+ x = x + g
99
+ for i in range(self.n_layers):
100
+ y = self.convs_sep[i](x * x_mask)
101
+ y = self.norms_1[i](y)
102
+ y = F.gelu(y)
103
+ y = self.convs_1x1[i](y)
104
+ y = self.norms_2[i](y)
105
+ y = F.gelu(y)
106
+ y = self.drop(y)
107
+ x = x + y
108
+ return x * x_mask
109
+
110
+
111
+ class WN(torch.nn.Module):
112
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
113
+ super(WN, self).__init__()
114
+ assert(kernel_size % 2 == 1)
115
+ self.hidden_channels =hidden_channels
116
+ self.kernel_size = kernel_size,
117
+ self.dilation_rate = dilation_rate
118
+ self.n_layers = n_layers
119
+ self.gin_channels = gin_channels
120
+ self.p_dropout = p_dropout
121
+
122
+ self.in_layers = torch.nn.ModuleList()
123
+ self.res_skip_layers = torch.nn.ModuleList()
124
+ self.drop = nn.Dropout(p_dropout)
125
+
126
+ if gin_channels != 0:
127
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
128
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
129
+
130
+ for i in range(n_layers):
131
+ dilation = dilation_rate ** i
132
+ padding = int((kernel_size * dilation - dilation) / 2)
133
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
134
+ dilation=dilation, padding=padding)
135
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
136
+ self.in_layers.append(in_layer)
137
+
138
+ # last one is not necessary
139
+ if i < n_layers - 1:
140
+ res_skip_channels = 2 * hidden_channels
141
+ else:
142
+ res_skip_channels = hidden_channels
143
+
144
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
145
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
146
+ self.res_skip_layers.append(res_skip_layer)
147
+
148
+ def forward(self, x, x_mask, g=None, **kwargs):
149
+ output = torch.zeros_like(x)
150
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
151
+
152
+ if g is not None:
153
+ g = self.cond_layer(g)
154
+
155
+ for i in range(self.n_layers):
156
+ x_in = self.in_layers[i](x)
157
+ if g is not None:
158
+ cond_offset = i * 2 * self.hidden_channels
159
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
160
+ else:
161
+ g_l = torch.zeros_like(x_in)
162
+
163
+ acts = commons.fused_add_tanh_sigmoid_multiply(
164
+ x_in,
165
+ g_l,
166
+ n_channels_tensor)
167
+ acts = self.drop(acts)
168
+
169
+ res_skip_acts = self.res_skip_layers[i](acts)
170
+ if i < self.n_layers - 1:
171
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
172
+ x = (x + res_acts) * x_mask
173
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
174
+ else:
175
+ output = output + res_skip_acts
176
+ return output * x_mask
177
+
178
+ def remove_weight_norm(self):
179
+ if self.gin_channels != 0:
180
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
181
+ for l in self.in_layers:
182
+ torch.nn.utils.remove_weight_norm(l)
183
+ for l in self.res_skip_layers:
184
+ torch.nn.utils.remove_weight_norm(l)
185
+
186
+
187
+ class ResBlock1(torch.nn.Module):
188
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
189
+ super(ResBlock1, self).__init__()
190
+ self.convs1 = nn.ModuleList([
191
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
192
+ padding=get_padding(kernel_size, dilation[0]))),
193
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
194
+ padding=get_padding(kernel_size, dilation[1]))),
195
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
196
+ padding=get_padding(kernel_size, dilation[2])))
197
+ ])
198
+ self.convs1.apply(init_weights)
199
+
200
+ self.convs2 = nn.ModuleList([
201
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
+ padding=get_padding(kernel_size, 1))),
203
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
+ padding=get_padding(kernel_size, 1))),
205
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
206
+ padding=get_padding(kernel_size, 1)))
207
+ ])
208
+ self.convs2.apply(init_weights)
209
+
210
+ def forward(self, x, x_mask=None):
211
+ for c1, c2 in zip(self.convs1, self.convs2):
212
+ xt = F.leaky_relu(x, LRELU_SLOPE)
213
+ if x_mask is not None:
214
+ xt = xt * x_mask
215
+ xt = c1(xt)
216
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
217
+ if x_mask is not None:
218
+ xt = xt * x_mask
219
+ xt = c2(xt)
220
+ x = xt + x
221
+ if x_mask is not None:
222
+ x = x * x_mask
223
+ return x
224
+
225
+ def remove_weight_norm(self):
226
+ for l in self.convs1:
227
+ remove_weight_norm(l)
228
+ for l in self.convs2:
229
+ remove_weight_norm(l)
230
+
231
+
232
+ class ResBlock2(torch.nn.Module):
233
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
234
+ super(ResBlock2, self).__init__()
235
+ self.convs = nn.ModuleList([
236
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
237
+ padding=get_padding(kernel_size, dilation[0]))),
238
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
239
+ padding=get_padding(kernel_size, dilation[1])))
240
+ ])
241
+ self.convs.apply(init_weights)
242
+
243
+ def forward(self, x, x_mask=None):
244
+ for c in self.convs:
245
+ xt = F.leaky_relu(x, LRELU_SLOPE)
246
+ if x_mask is not None:
247
+ xt = xt * x_mask
248
+ xt = c(xt)
249
+ x = xt + x
250
+ if x_mask is not None:
251
+ x = x * x_mask
252
+ return x
253
+
254
+ def remove_weight_norm(self):
255
+ for l in self.convs:
256
+ remove_weight_norm(l)
257
+
258
+
259
+ class Log(nn.Module):
260
+ def forward(self, x, x_mask, reverse=False, **kwargs):
261
+ if not reverse:
262
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
263
+ logdet = torch.sum(-y, [1, 2])
264
+ return y, logdet
265
+ else:
266
+ x = torch.exp(x) * x_mask
267
+ return x
268
+
269
+
270
+ class Flip(nn.Module):
271
+ def forward(self, x, *args, reverse=False, **kwargs):
272
+ x = torch.flip(x, [1])
273
+ if not reverse:
274
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
275
+ return x, logdet
276
+ else:
277
+ return x
278
+
279
+
280
+ class ElementwiseAffine(nn.Module):
281
+ def __init__(self, channels):
282
+ super().__init__()
283
+ self.channels = channels
284
+ self.m = nn.Parameter(torch.zeros(channels,1))
285
+ self.logs = nn.Parameter(torch.zeros(channels,1))
286
+
287
+ def forward(self, x, x_mask, reverse=False, **kwargs):
288
+ if not reverse:
289
+ y = self.m + torch.exp(self.logs) * x
290
+ y = y * x_mask
291
+ logdet = torch.sum(self.logs * x_mask, [1,2])
292
+ return y, logdet
293
+ else:
294
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
295
+ return x
296
+
297
+
298
+ class ResidualCouplingLayer(nn.Module):
299
+ def __init__(self,
300
+ channels,
301
+ hidden_channels,
302
+ kernel_size,
303
+ dilation_rate,
304
+ n_layers,
305
+ p_dropout=0,
306
+ gin_channels=0,
307
+ mean_only=False):
308
+ assert channels % 2 == 0, "channels should be divisible by 2"
309
+ super().__init__()
310
+ self.channels = channels
311
+ self.hidden_channels = hidden_channels
312
+ self.kernel_size = kernel_size
313
+ self.dilation_rate = dilation_rate
314
+ self.n_layers = n_layers
315
+ self.half_channels = channels // 2
316
+ self.mean_only = mean_only
317
+
318
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
319
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
320
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
321
+ self.post.weight.data.zero_()
322
+ self.post.bias.data.zero_()
323
+
324
+ def forward(self, x, x_mask, g=None, reverse=False):
325
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
326
+ h = self.pre(x0) * x_mask
327
+ h = self.enc(h, x_mask, g=g)
328
+ stats = self.post(h) * x_mask
329
+ if not self.mean_only:
330
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
331
+ else:
332
+ m = stats
333
+ logs = torch.zeros_like(m)
334
+
335
+ if not reverse:
336
+ x1 = m + x1 * torch.exp(logs) * x_mask
337
+ x = torch.cat([x0, x1], 1)
338
+ logdet = torch.sum(logs, [1,2])
339
+ return x, logdet
340
+ else:
341
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
342
+ x = torch.cat([x0, x1], 1)
343
+ return x
344
+
345
+
346
+ class ConvFlow(nn.Module):
347
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
348
+ super().__init__()
349
+ self.in_channels = in_channels
350
+ self.filter_channels = filter_channels
351
+ self.kernel_size = kernel_size
352
+ self.n_layers = n_layers
353
+ self.num_bins = num_bins
354
+ self.tail_bound = tail_bound
355
+ self.half_channels = in_channels // 2
356
+
357
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
358
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
359
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
360
+ self.proj.weight.data.zero_()
361
+ self.proj.bias.data.zero_()
362
+
363
+ def forward(self, x, x_mask, g=None, reverse=False):
364
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
365
+ h = self.pre(x0)
366
+ h = self.convs(h, x_mask, g=g)
367
+ h = self.proj(h) * x_mask
368
+
369
+ b, c, t = x0.shape
370
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
371
+
372
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
373
+ unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
374
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
375
+
376
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
377
+ unnormalized_widths,
378
+ unnormalized_heights,
379
+ unnormalized_derivatives,
380
+ inverse=reverse,
381
+ tails='linear',
382
+ tail_bound=self.tail_bound
383
+ )
384
+
385
+ x = torch.cat([x0, x1], 1) * x_mask
386
+ logdet = torch.sum(logabsdet * x_mask, [1,2])
387
+ if not reverse:
388
+ return x, logdet
389
+ else:
390
+ return x
monotonic_align/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from .monotonic_align.core import maximum_path_c
4
+
5
+
6
+
7
+ def maximum_path(neg_cent, mask):
8
+ """ Cython optimized version.
9
+ neg_cent: [b, t_t, t_s]
10
+ mask: [b, t_t, t_s]
11
+ """
12
+ device = neg_cent.device
13
+ dtype = neg_cent.dtype
14
+ neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
15
+ path = np.zeros(neg_cent.shape, dtype=np.int32)
16
+
17
+ t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
18
+ t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
19
+ maximum_path_c(path, neg_cent, t_t_max, t_s_max)
20
+ return torch.from_numpy(path).to(device=device, dtype=dtype)
monotonic_align/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (772 Bytes). View file
 
monotonic_align/build/temp.linux-x86_64-3.7/core.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7a7396b25fc8d80c9bbd39e5ad7cfd4d5b5ec95d9dc023593d6cb1abb752a21
3
+ size 1984928
monotonic_align/core.c ADDED
The diff for this file is too large to render. See raw diff
 
monotonic_align/core.pyx ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport cython
2
+ from cython.parallel import prange
3
+
4
+
5
+ @cython.boundscheck(False)
6
+ @cython.wraparound(False)
7
+ cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil:
8
+ cdef int x
9
+ cdef int y
10
+ cdef float v_prev
11
+ cdef float v_cur
12
+ cdef float tmp
13
+ cdef int index = t_x - 1
14
+
15
+ for y in range(t_y):
16
+ for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
17
+ if x == y:
18
+ v_cur = max_neg_val
19
+ else:
20
+ v_cur = value[y-1, x]
21
+ if x == 0:
22
+ if y == 0:
23
+ v_prev = 0.
24
+ else:
25
+ v_prev = max_neg_val
26
+ else:
27
+ v_prev = value[y-1, x-1]
28
+ value[y, x] += max(v_prev, v_cur)
29
+
30
+ for y in range(t_y - 1, -1, -1):
31
+ path[y, index] = 1
32
+ if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
33
+ index = index - 1
34
+
35
+
36
+ @cython.boundscheck(False)
37
+ @cython.wraparound(False)
38
+ cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil:
39
+ cdef int b = paths.shape[0]
40
+ cdef int i
41
+ for i in prange(b, nogil=True):
42
+ maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
monotonic_align/monotonic_align/core.cpython-37m-x86_64-linux-gnu.so ADDED
Binary file (815 kB). View file
 
monotonic_align/setup.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from distutils.core import setup
2
+ from Cython.Build import cythonize
3
+ import numpy
4
+
5
+ setup(
6
+ name = 'monotonic_align',
7
+ ext_modules = cythonize("core.pyx"),
8
+ include_dirs=[numpy.get_include()]
9
+ )
preprocess.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import text
3
+ from utils import load_filepaths_and_text
4
+
5
+ if __name__ == '__main__':
6
+ parser = argparse.ArgumentParser()
7
+ parser.add_argument("--out_extension", default="cleaned")
8
+ parser.add_argument("--text_index", default=1, type=int)
9
+ parser.add_argument("--filelists", nargs="+", default=["/mnt/vits_koni/filelists/koni_vocals_text_val_filelist.txt"])
10
+ parser.add_argument("--text_cleaners", nargs="+", default=["japanese_cleaners"])
11
+
12
+ args = parser.parse_args()
13
+
14
+
15
+ for filelist in args.filelists:
16
+ print("START:", filelist)
17
+ filepaths_and_text = load_filepaths_and_text(filelist)
18
+ for i in range(len(filepaths_and_text)):
19
+ original_text = filepaths_and_text[i][args.text_index]
20
+ cleaned_text = text._clean_text(original_text, args.text_cleaners)
21
+ filepaths_and_text[i][args.text_index] = cleaned_text
22
+
23
+ new_filelist = filelist + "." + args.out_extension
24
+ with open(new_filelist, "w", encoding="utf-8") as f:
25
+ f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
26
+
27
+ print('1')
requirements (1).txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cython==0.29.21
2
+ librosa==0.8.0
3
+ matplotlib==3.3.1
4
+ numpy==1.21.6
5
+ phonemizer==2.2.1
6
+ scipy==1.5.2
7
+ tensorboard==2.3.0
8
+ torch==1.6.0
9
+ torchvision==0.7.0
10
+ Unidecode==1.1.1
11
+ pyopenjtalk==0.2.0
12
+ jamo==0.4.1
13
+ pypinyin==0.44.0
14
+ jieba==0.42.1
text/LICENSE.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2017 Keith Ito
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in
11
+ all copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ THE SOFTWARE.
text/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+ from text import cleaners
3
+ from text.symbols import symbols
4
+
5
+
6
+ # Mappings from symbol to numeric ID and vice versa:
7
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
+ _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
+
10
+
11
+ def text_to_sequence(text, cleaner_names):
12
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
+ Args:
14
+ text: string to convert to a sequence
15
+ cleaner_names: names of the cleaner functions to run the text through
16
+ Returns:
17
+ List of integers corresponding to the symbols in the text
18
+ '''
19
+ sequence = []
20
+
21
+ clean_text = _clean_text(text, cleaner_names)
22
+ for symbol in clean_text:
23
+ if symbol not in _symbol_to_id.keys():
24
+ continue
25
+ symbol_id = _symbol_to_id[symbol]
26
+ sequence += [symbol_id]
27
+ return sequence
28
+
29
+
30
+ def cleaned_text_to_sequence(cleaned_text):
31
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
32
+ Args:
33
+ text: string to convert to a sequence
34
+ Returns:
35
+ List of integers corresponding to the symbols in the text
36
+ '''
37
+ sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
38
+ return sequence
39
+
40
+
41
+ def sequence_to_text(sequence):
42
+ '''Converts a sequence of IDs back to a string'''
43
+ result = ''
44
+ for symbol_id in sequence:
45
+ s = _id_to_symbol[symbol_id]
46
+ result += s
47
+ return result
48
+
49
+
50
+ def _clean_text(text, cleaner_names):
51
+ for name in cleaner_names:
52
+ cleaner = getattr(cleaners, name)
53
+ if not cleaner:
54
+ raise Exception('Unknown cleaner: %s' % name)
55
+ text = cleaner(text)
56
+ return text
text/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (2.1 kB). View file
 
text/__pycache__/cleaners.cpython-37.pyc ADDED
Binary file (8.76 kB). View file
 
text/__pycache__/symbols.cpython-37.pyc ADDED
Binary file (364 Bytes). View file
 
text/cleaners.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ from https://github.com/keithito/tacotron """
2
+
3
+ '''
4
+ Cleaners are transformations that run over the input text at both training and eval time.
5
+
6
+ Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
+ hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
+ 1. "english_cleaners" for English text
9
+ 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
+ the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
+ 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
+ the symbols in symbols.py to match your data).
13
+ '''
14
+
15
+ import re
16
+ from unidecode import unidecode
17
+ import jieba
18
+ import pyopenjtalk
19
+ from jamo import h2j, j2hcj
20
+ from pypinyin import lazy_pinyin,BOPOMOFO
21
+
22
+
23
+
24
+ # This is a list of Korean classifiers preceded by pure Korean numerals.
25
+ _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
26
+
27
+ # Regular expression matching whitespace:
28
+ _whitespace_re = re.compile(r'\s+')
29
+
30
+ # Regular expression matching Japanese without punctuation marks:
31
+ _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
32
+
33
+ # Regular expression matching non-Japanese characters or punctuation marks:
34
+ _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
35
+
36
+ # List of (regular expression, replacement) pairs for abbreviations:
37
+ _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
38
+ ('mrs', 'misess'),
39
+ ('mr', 'mister'),
40
+ ('dr', 'doctor'),
41
+ ('st', 'saint'),
42
+ ('co', 'company'),
43
+ ('jr', 'junior'),
44
+ ('maj', 'major'),
45
+ ('gen', 'general'),
46
+ ('drs', 'doctors'),
47
+ ('rev', 'reverend'),
48
+ ('lt', 'lieutenant'),
49
+ ('hon', 'honorable'),
50
+ ('sgt', 'sergeant'),
51
+ ('capt', 'captain'),
52
+ ('esq', 'esquire'),
53
+ ('ltd', 'limited'),
54
+ ('col', 'colonel'),
55
+ ('ft', 'fort'),
56
+ ]]
57
+
58
+ # List of (hangul, hangul divided) pairs:
59
+ _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
60
+ ('ㄳ', 'ㄱㅅ'),
61
+ ('ㄵ', 'ㄴㅈ'),
62
+ ('ㄶ', 'ㄴㅎ'),
63
+ ('ㄺ', 'ㄹㄱ'),
64
+ ('ㄻ', 'ㄹㅁ'),
65
+ ('ㄼ', 'ㄹㅂ'),
66
+ ('ㄽ', 'ㄹㅅ'),
67
+ ('ㄾ', 'ㄹㅌ'),
68
+ ('ㄿ', 'ㄹㅍ'),
69
+ ('ㅀ', 'ㄹㅎ'),
70
+ ('ㅄ', 'ㅂㅅ'),
71
+ ('ㅘ', 'ㅗㅏ'),
72
+ ('ㅙ', 'ㅗㅐ'),
73
+ ('ㅚ', 'ㅗㅣ'),
74
+ ('ㅝ', 'ㅜㅓ'),
75
+ ('ㅞ', 'ㅜㅔ'),
76
+ ('ㅟ', 'ㅜㅣ'),
77
+ ('ㅢ', 'ㅡㅣ'),
78
+ ('ㅑ', 'ㅣㅏ'),
79
+ ('ㅒ', 'ㅣㅐ'),
80
+ ('ㅕ', 'ㅣㅓ'),
81
+ ('ㅖ', 'ㅣㅔ'),
82
+ ('ㅛ', 'ㅣㅗ'),
83
+ ('ㅠ', 'ㅣㅜ')
84
+ ]]
85
+
86
+ # List of (Latin alphabet, hangul) pairs:
87
+ _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
88
+ ('a', '에이'),
89
+ ('b', '비'),
90
+ ('c', '시'),
91
+ ('d', '디'),
92
+ ('e', '이'),
93
+ ('f', '에프'),
94
+ ('g', '지'),
95
+ ('h', '에이치'),
96
+ ('i', '아이'),
97
+ ('j', '제이'),
98
+ ('k', '케이'),
99
+ ('l', '엘'),
100
+ ('m', '엠'),
101
+ ('n', '엔'),
102
+ ('o', '오'),
103
+ ('p', '피'),
104
+ ('q', '큐'),
105
+ ('r', '아르'),
106
+ ('s', '에스'),
107
+ ('t', '티'),
108
+ ('u', '유'),
109
+ ('v', '브이'),
110
+ ('w', '더블유'),
111
+ ('x', '엑스'),
112
+ ('y', '와이'),
113
+ ('z', '제트')
114
+ ]]
115
+
116
+
117
+ def expand_abbreviations(text):
118
+ for regex, replacement in _abbreviations:
119
+ text = re.sub(regex, replacement, text)
120
+ return text
121
+
122
+
123
+ def lowercase(text):
124
+ return text.lower()
125
+
126
+
127
+ def collapse_whitespace(text):
128
+ return re.sub(_whitespace_re, ' ', text)
129
+
130
+
131
+ def convert_to_ascii(text):
132
+ return unidecode(text)
133
+
134
+
135
+ def latin_to_hangul(text):
136
+ for regex, replacement in _latin_to_hangul:
137
+ text = re.sub(regex, replacement, text)
138
+ return text
139
+
140
+
141
+ def divide_hangul(text):
142
+ for regex, replacement in _hangul_divided:
143
+ text = re.sub(regex, replacement, text)
144
+ return text
145
+
146
+
147
+ def hangul_number(num, sino=True):
148
+ '''Reference https://github.com/Kyubyong/g2pK'''
149
+ num = re.sub(',', '', num)
150
+
151
+ if num == '0':
152
+ return '영'
153
+ if not sino and num == '20':
154
+ return '스무'
155
+
156
+ digits = '123456789'
157
+ names = '일이삼사오육칠팔구'
158
+ digit2name = {d: n for d, n in zip(digits, names)}
159
+
160
+ modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
161
+ decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
162
+ digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
163
+ digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
164
+
165
+ spelledout = []
166
+ for i, digit in enumerate(num):
167
+ i = len(num) - i - 1
168
+ if sino:
169
+ if i == 0:
170
+ name = digit2name.get(digit, '')
171
+ elif i == 1:
172
+ name = digit2name.get(digit, '') + '십'
173
+ name = name.replace('일십', '십')
174
+ else:
175
+ if i == 0:
176
+ name = digit2mod.get(digit, '')
177
+ elif i == 1:
178
+ name = digit2dec.get(digit, '')
179
+ if digit == '0':
180
+ if i % 4 == 0:
181
+ last_three = spelledout[-min(3, len(spelledout)):]
182
+ if ''.join(last_three) == '':
183
+ spelledout.append('')
184
+ continue
185
+ else:
186
+ spelledout.append('')
187
+ continue
188
+ if i == 2:
189
+ name = digit2name.get(digit, '') + '백'
190
+ name = name.replace('일백', '백')
191
+ elif i == 3:
192
+ name = digit2name.get(digit, '') + '천'
193
+ name = name.replace('일천', '천')
194
+ elif i == 4:
195
+ name = digit2name.get(digit, '') + '만'
196
+ name = name.replace('일만', '만')
197
+ elif i == 5:
198
+ name = digit2name.get(digit, '') + '십'
199
+ name = name.replace('일십', '십')
200
+ elif i == 6:
201
+ name = digit2name.get(digit, '') + '백'
202
+ name = name.replace('일백', '백')
203
+ elif i == 7:
204
+ name = digit2name.get(digit, '') + '천'
205
+ name = name.replace('일천', '천')
206
+ elif i == 8:
207
+ name = digit2name.get(digit, '') + '억'
208
+ elif i == 9:
209
+ name = digit2name.get(digit, '') + '십'
210
+ elif i == 10:
211
+ name = digit2name.get(digit, '') + '백'
212
+ elif i == 11:
213
+ name = digit2name.get(digit, '') + '천'
214
+ elif i == 12:
215
+ name = digit2name.get(digit, '') + '조'
216
+ elif i == 13:
217
+ name = digit2name.get(digit, '') + '십'
218
+ elif i == 14:
219
+ name = digit2name.get(digit, '') + '백'
220
+ elif i == 15:
221
+ name = digit2name.get(digit, '') + '천'
222
+ spelledout.append(name)
223
+ return ''.join(elem for elem in spelledout)
224
+
225
+
226
+ def number_to_hangul(text):
227
+ '''Reference https://github.com/Kyubyong/g2pK'''
228
+ tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
229
+ for token in tokens:
230
+ num, classifier = token
231
+ if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
232
+ spelledout = hangul_number(num, sino=False)
233
+ else:
234
+ spelledout = hangul_number(num, sino=True)
235
+ text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
236
+ # digit by digit for remaining digits
237
+ digits = '0123456789'
238
+ names = '영일이삼사오육칠팔구'
239
+ for d, n in zip(digits, names):
240
+ text = text.replace(d, n)
241
+ return text
242
+
243
+
244
+ def basic_cleaners(text):
245
+ '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
246
+ text = lowercase(text)
247
+ text = collapse_whitespace(text)
248
+ return text
249
+
250
+
251
+ def transliteration_cleaners(text):
252
+ '''Pipeline for non-English text that transliterates to ASCII.'''
253
+ text = convert_to_ascii(text)
254
+ text = lowercase(text)
255
+ text = collapse_whitespace(text)
256
+ return text
257
+
258
+
259
+ def japanese_cleaners(text):
260
+ '''Pipeline for notating accent in Japanese text.
261
+ Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
262
+ sentences = re.split(_japanese_marks, text)
263
+ marks = re.findall(_japanese_marks, text)
264
+ text = ''
265
+ for i, sentence in enumerate(sentences):
266
+ if re.match(_japanese_characters, sentence):
267
+ if text!='':
268
+ text+=' '
269
+ labels = pyopenjtalk.extract_fullcontext(sentence)
270
+ for n, label in enumerate(labels):
271
+ phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
272
+ if phoneme not in ['sil','pau']:
273
+ text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
274
+ else:
275
+ continue
276
+ n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
277
+ a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
278
+ a2 = int(re.search(r"\+(\d+)\+", label).group(1))
279
+ a3 = int(re.search(r"\+(\d+)/", label).group(1))
280
+ if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
281
+ a2_next=-1
282
+ else:
283
+ a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
284
+ # Accent phrase boundary
285
+ if a3 == 1 and a2_next == 1:
286
+ text += ' '
287
+ # Falling
288
+ elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
289
+ text += '↓'
290
+ # Rising
291
+ elif a2 == 1 and a2_next == 2:
292
+ text += '↑'
293
+ if i<len(marks):
294
+ text += unidecode(marks[i]).replace(' ','')
295
+ if re.match('[A-Za-z]',text[-1]):
296
+ text += '.'
297
+ return text
298
+
299
+
300
+ def japanese_cleaners2(text):
301
+ return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
302
+
303
+
304
+ def korean_cleaners(text):
305
+ '''Pipeline for Korean text'''
306
+ text = latin_to_hangul(text)
307
+ text = number_to_hangul(text)
308
+ text = j2hcj(h2j(text))
309
+ text = divide_hangul(text)
310
+ if re.match('[\u3131-\u3163]',text[-1]):
311
+ text += '.'
312
+ return text
313
+
314
+
315
+ def chinese_cleaners(text):
316
+ '''Pipeline for Chinese text'''
317
+ text=text.replace('、',',').replace(';',',').replace(':',',')
318
+ words=jieba.lcut(text,cut_all=False)
319
+ text=''
320
+ for word in words:
321
+ bopomofos=lazy_pinyin(word,BOPOMOFO)
322
+ if not re.search('[\u4e00-\u9fff]',word):
323
+ text+=word
324
+ continue
325
+ for i in range(len(bopomofos)):
326
+ if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
327
+ bopomofos[i]+='ˉ'
328
+ if text!='':
329
+ text+=' '
330
+ text+=''.join(bopomofos)
331
+ if re.match('[ˉˊˇˋ˙]',text[-1]):
332
+ text += '。'
333
+ return text
text/symbols.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Defines the set of symbols used in text input to the model.
3
+ '''
4
+
5
+ # japanese_cleaners
6
+ _pad = '_'
7
+ _punctuation = ',.!?-'
8
+ _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
9
+
10
+
11
+ # # japanese_cleaners2
12
+ # _pad = '_'
13
+ # _punctuation = ',.!?-~…'
14
+ # _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
15
+
16
+
17
+ '''# korean_cleaners
18
+ _pad = '_'
19
+ _punctuation = ',.!?…~'
20
+ _letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
21
+ '''
22
+
23
+ '''# chinese_cleaners
24
+ _pad = '_'
25
+ _punctuation = ',。!?—…'
26
+ _letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
27
+ '''
28
+
29
+ # Export all symbols:
30
+ symbols = [_pad] + list(_punctuation) + list(_letters)
31
+
32
+ # Special symbol ids
33
+ SPACE_ID = symbols.index(" ")
train.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ import itertools
5
+ import math
6
+ import torch
7
+ from torch import nn, optim
8
+ from torch.nn import functional as F
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.tensorboard import SummaryWriter
11
+ import torch.multiprocessing as mp
12
+ import torch.distributed as dist
13
+ from torch.nn.parallel import DistributedDataParallel as DDP
14
+ from torch.cuda.amp import autocast, GradScaler
15
+
16
+ import librosa
17
+ import logging
18
+
19
+ logging.getLogger('numba').setLevel(logging.WARNING)
20
+
21
+ import commons
22
+ import utils
23
+ from data_utils import (
24
+ TextAudioLoader,
25
+ TextAudioCollate,
26
+ DistributedBucketSampler
27
+ )
28
+ from models import (
29
+ SynthesizerTrn,
30
+ MultiPeriodDiscriminator,
31
+ )
32
+ from losses import (
33
+ generator_loss,
34
+ discriminator_loss,
35
+ feature_loss,
36
+ kl_loss
37
+ )
38
+ from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
39
+ from text.symbols import symbols
40
+
41
+
42
+ torch.backends.cudnn.benchmark = True
43
+ global_step = 0
44
+
45
+
46
+ def main():
47
+ """Assume Single Node Multi GPUs Training Only"""
48
+ assert torch.cuda.is_available(), "CPU training is not allowed."
49
+
50
+ n_gpus = torch.cuda.device_count()
51
+ os.environ['MASTER_ADDR'] = 'localhost'
52
+ os.environ['MASTER_PORT'] = '80000'
53
+
54
+ hps = utils.get_hparams()
55
+ mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
56
+
57
+
58
+ def run(rank, n_gpus, hps):
59
+ global global_step
60
+ if rank == 0:
61
+ logger = utils.get_logger(hps.model_dir)
62
+ logger.info(hps)
63
+ utils.check_git_hash(hps.model_dir)
64
+ writer = SummaryWriter(log_dir=hps.model_dir)
65
+ writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
66
+
67
+ dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
68
+ torch.manual_seed(hps.train.seed)
69
+ torch.cuda.set_device(rank)
70
+
71
+ train_dataset = TextAudioLoader(hps.data.training_files, hps.data)
72
+ train_sampler = DistributedBucketSampler(
73
+ train_dataset,
74
+ hps.train.batch_size,
75
+ [32,300,400,500,600,700,800,900,1000],
76
+ num_replicas=n_gpus,
77
+ rank=rank,
78
+ shuffle=True)
79
+ collate_fn = TextAudioCollate()
80
+ train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
81
+ collate_fn=collate_fn, batch_sampler=train_sampler)
82
+ if rank == 0:
83
+ eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data)
84
+ eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
85
+ batch_size=hps.train.batch_size, pin_memory=True,
86
+ drop_last=False, collate_fn=collate_fn)
87
+
88
+ net_g = SynthesizerTrn(
89
+ len(symbols),
90
+ hps.data.filter_length // 2 + 1,
91
+ hps.train.segment_size // hps.data.hop_length,
92
+ **hps.model).cuda(rank)
93
+ net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
94
+ optim_g = torch.optim.AdamW(
95
+ net_g.parameters(),
96
+ hps.train.learning_rate,
97
+ betas=hps.train.betas,
98
+ eps=hps.train.eps)
99
+ optim_d = torch.optim.AdamW(
100
+ net_d.parameters(),
101
+ hps.train.learning_rate,
102
+ betas=hps.train.betas,
103
+ eps=hps.train.eps)
104
+ #net_g = DDP(net_g, device_ids=[rank])
105
+ #net_d = DDP(net_d, device_ids=[rank])
106
+
107
+ net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
108
+ net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
109
+
110
+
111
+ try:
112
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
113
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
114
+ global_step = (epoch_str - 1) * len(train_loader)
115
+ except:
116
+ epoch_str = 1
117
+ global_step = 0
118
+
119
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
120
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
121
+
122
+ scaler = GradScaler(enabled=hps.train.fp16_run)
123
+
124
+ for epoch in range(epoch_str, hps.train.epochs + 1):
125
+ if rank==0:
126
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
127
+ else:
128
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
129
+ scheduler_g.step()
130
+ scheduler_d.step()
131
+
132
+
133
+ def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
134
+ net_g, net_d = nets
135
+ optim_g, optim_d = optims
136
+ scheduler_g, scheduler_d = schedulers
137
+ train_loader, eval_loader = loaders
138
+ if writers is not None:
139
+ writer, writer_eval = writers
140
+
141
+ train_loader.batch_sampler.set_epoch(epoch)
142
+ global global_step
143
+
144
+ net_g.train()
145
+ net_d.train()
146
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader):
147
+ x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
148
+ spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
149
+ y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
150
+
151
+ with autocast(enabled=hps.train.fp16_run):
152
+ y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
153
+ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths)
154
+
155
+ mel = spec_to_mel_torch(
156
+ spec,
157
+ hps.data.filter_length,
158
+ hps.data.n_mel_channels,
159
+ hps.data.sampling_rate,
160
+ hps.data.mel_fmin,
161
+ hps.data.mel_fmax)
162
+ y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
163
+ y_hat_mel = mel_spectrogram_torch(
164
+ y_hat.squeeze(1),
165
+ hps.data.filter_length,
166
+ hps.data.n_mel_channels,
167
+ hps.data.sampling_rate,
168
+ hps.data.hop_length,
169
+ hps.data.win_length,
170
+ hps.data.mel_fmin,
171
+ hps.data.mel_fmax
172
+ )
173
+
174
+ y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
175
+
176
+ # Discriminator
177
+ y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
178
+ with autocast(enabled=False):
179
+ loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
180
+ loss_disc_all = loss_disc
181
+ optim_d.zero_grad()
182
+ scaler.scale(loss_disc_all).backward()
183
+ scaler.unscale_(optim_d)
184
+ grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
185
+ scaler.step(optim_d)
186
+
187
+ with autocast(enabled=hps.train.fp16_run):
188
+ # Generator
189
+ y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
190
+ with autocast(enabled=False):
191
+ loss_dur = torch.sum(l_length.float())
192
+ loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
193
+ loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
194
+
195
+ loss_fm = feature_loss(fmap_r, fmap_g)
196
+ loss_gen, losses_gen = generator_loss(y_d_hat_g)
197
+ loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
198
+ optim_g.zero_grad()
199
+ scaler.scale(loss_gen_all).backward()
200
+ scaler.unscale_(optim_g)
201
+ grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
202
+ scaler.step(optim_g)
203
+ scaler.update()
204
+
205
+ if rank==0:
206
+ if global_step % hps.train.log_interval == 0:
207
+ lr = optim_g.param_groups[0]['lr']
208
+ losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
209
+ logger.info('Train Epoch: {} [{:.0f}%]'.format(
210
+ epoch,
211
+ 100. * batch_idx / len(train_loader)))
212
+ logger.info([x.item() for x in losses] + [global_step, lr])
213
+
214
+ scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
215
+ scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
216
+
217
+ scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
218
+ scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
219
+ scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
220
+ image_dict = {
221
+ "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
222
+ "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
223
+ "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
224
+ "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
225
+ }
226
+ utils.summarize(
227
+ writer=writer,
228
+ global_step=global_step,
229
+ images=image_dict,
230
+ scalars=scalar_dict)
231
+
232
+ if global_step % hps.train.eval_interval == 0:
233
+ evaluate(hps, net_g, eval_loader, writer_eval)
234
+ utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
235
+ utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
236
+ global_step += 1
237
+
238
+ if rank == 0:
239
+ logger.info('====> Epoch: {}'.format(epoch))
240
+
241
+
242
+ def evaluate(hps, generator, eval_loader, writer_eval):
243
+ generator.eval()
244
+ with torch.no_grad():
245
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(eval_loader):
246
+ x, x_lengths = x.cuda(0), x_lengths.cuda(0)
247
+ spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
248
+ y, y_lengths = y.cuda(0), y_lengths.cuda(0)
249
+
250
+ # remove else
251
+ x = x[:1]
252
+ x_lengths = x_lengths[:1]
253
+ spec = spec[:1]
254
+ spec_lengths = spec_lengths[:1]
255
+ y = y[:1]
256
+ y_lengths = y_lengths[:1]
257
+ break
258
+ y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, max_len=1000)
259
+ y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
260
+
261
+ mel = spec_to_mel_torch(
262
+ spec,
263
+ hps.data.filter_length,
264
+ hps.data.n_mel_channels,
265
+ hps.data.sampling_rate,
266
+ hps.data.mel_fmin,
267
+ hps.data.mel_fmax)
268
+ y_hat_mel = mel_spectrogram_torch(
269
+ y_hat.squeeze(1).float(),
270
+ hps.data.filter_length,
271
+ hps.data.n_mel_channels,
272
+ hps.data.sampling_rate,
273
+ hps.data.hop_length,
274
+ hps.data.win_length,
275
+ hps.data.mel_fmin,
276
+ hps.data.mel_fmax
277
+ )
278
+ image_dict = {
279
+ "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
280
+ }
281
+ audio_dict = {
282
+ "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
283
+ }
284
+ if global_step == 0:
285
+ image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
286
+ audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
287
+
288
+ utils.summarize(
289
+ writer=writer_eval,
290
+ global_step=global_step,
291
+ images=image_dict,
292
+ audios=audio_dict,
293
+ audio_sampling_rate=hps.data.sampling_rate
294
+ )
295
+ generator.train()
296
+
297
+
298
+ if __name__ == "__main__":
299
+ main()
300
+ print('1')
train_ms.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ import itertools
5
+ import math
6
+ import torch
7
+ from torch import nn, optim
8
+ from torch.nn import functional as F
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.tensorboard import SummaryWriter
11
+ import torch.multiprocessing as mp
12
+ import torch.distributed as dist
13
+ from torch.nn.parallel import DistributedDataParallel as DDP
14
+ from torch.cuda.amp import autocast, GradScaler
15
+
16
+ import librosa
17
+ import logging
18
+
19
+ logging.getLogger('numba').setLevel(logging.WARNING)
20
+
21
+ import commons
22
+ import utils
23
+ from data_utils import (
24
+ TextAudioSpeakerLoader,
25
+ TextAudioSpeakerCollate,
26
+ DistributedBucketSampler
27
+ )
28
+ from models import (
29
+ SynthesizerTrn,
30
+ MultiPeriodDiscriminator,
31
+ )
32
+ from losses import (
33
+ generator_loss,
34
+ discriminator_loss,
35
+ feature_loss,
36
+ kl_loss
37
+ )
38
+ from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
39
+ from text.symbols import symbols
40
+
41
+
42
+ torch.backends.cudnn.benchmark = True
43
+ global_step = 0
44
+
45
+
46
+ def main():
47
+ """Assume Single Node Multi GPUs Training Only"""
48
+ assert torch.cuda.is_available(), "CPU training is not allowed."
49
+
50
+ n_gpus = torch.cuda.device_count()
51
+ os.environ['MASTER_ADDR'] = 'localhost'
52
+ os.environ['MASTER_PORT'] = '80000'
53
+
54
+ hps = utils.get_hparams()
55
+ mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
56
+
57
+
58
+ def run(rank, n_gpus, hps):
59
+ global global_step
60
+ if rank == 0:
61
+ logger = utils.get_logger(hps.model_dir)
62
+ logger.info(hps)
63
+ utils.check_git_hash(hps.model_dir)
64
+ writer = SummaryWriter(log_dir=hps.model_dir)
65
+ writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
66
+
67
+ dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
68
+ torch.manual_seed(hps.train.seed)
69
+ torch.cuda.set_device(rank)
70
+
71
+ train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
72
+ train_sampler = DistributedBucketSampler(
73
+ train_dataset,
74
+ hps.train.batch_size,
75
+ [32,300,400,500,600,700,800,900,1000],
76
+ num_replicas=n_gpus,
77
+ rank=rank,
78
+ shuffle=True)
79
+ collate_fn = TextAudioSpeakerCollate()
80
+ train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
81
+ collate_fn=collate_fn, batch_sampler=train_sampler)
82
+ if rank == 0:
83
+ eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
84
+ eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False,
85
+ batch_size=hps.train.batch_size, pin_memory=True,
86
+ drop_last=False, collate_fn=collate_fn)
87
+
88
+ net_g = SynthesizerTrn(
89
+ len(symbols),
90
+ hps.data.filter_length // 2 + 1,
91
+ hps.train.segment_size // hps.data.hop_length,
92
+ n_speakers=hps.data.n_speakers,
93
+ **hps.model).cuda(rank)
94
+ net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
95
+ optim_g = torch.optim.AdamW(
96
+ net_g.parameters(),
97
+ hps.train.learning_rate,
98
+ betas=hps.train.betas,
99
+ eps=hps.train.eps)
100
+ optim_d = torch.optim.AdamW(
101
+ net_d.parameters(),
102
+ hps.train.learning_rate,
103
+ betas=hps.train.betas,
104
+ eps=hps.train.eps)
105
+ net_g = DDP(net_g, device_ids=[rank])
106
+ net_d = DDP(net_d, device_ids=[rank])
107
+
108
+ try:
109
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g)
110
+ _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d)
111
+ global_step = (epoch_str - 1) * len(train_loader)
112
+ except:
113
+ epoch_str = 1
114
+ global_step = 0
115
+
116
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
117
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str-2)
118
+
119
+ scaler = GradScaler(enabled=hps.train.fp16_run)
120
+
121
+ for epoch in range(epoch_str, hps.train.epochs + 1):
122
+ if rank==0:
123
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
124
+ else:
125
+ train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None)
126
+ scheduler_g.step()
127
+ scheduler_d.step()
128
+
129
+
130
+ def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
131
+ net_g, net_d = nets
132
+ optim_g, optim_d = optims
133
+ scheduler_g, scheduler_d = schedulers
134
+ train_loader, eval_loader = loaders
135
+ if writers is not None:
136
+ writer, writer_eval = writers
137
+
138
+ train_loader.batch_sampler.set_epoch(epoch)
139
+ global global_step
140
+
141
+ net_g.train()
142
+ net_d.train()
143
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(train_loader):
144
+ x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
145
+ spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
146
+ y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
147
+ speakers = speakers.cuda(rank, non_blocking=True)
148
+
149
+ with autocast(enabled=hps.train.fp16_run):
150
+ y_hat, l_length, attn, ids_slice, x_mask, z_mask,\
151
+ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(x, x_lengths, spec, spec_lengths, speakers)
152
+
153
+ mel = spec_to_mel_torch(
154
+ spec,
155
+ hps.data.filter_length,
156
+ hps.data.n_mel_channels,
157
+ hps.data.sampling_rate,
158
+ hps.data.mel_fmin,
159
+ hps.data.mel_fmax)
160
+ y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
161
+ y_hat_mel = mel_spectrogram_torch(
162
+ y_hat.squeeze(1),
163
+ hps.data.filter_length,
164
+ hps.data.n_mel_channels,
165
+ hps.data.sampling_rate,
166
+ hps.data.hop_length,
167
+ hps.data.win_length,
168
+ hps.data.mel_fmin,
169
+ hps.data.mel_fmax
170
+ )
171
+
172
+ y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
173
+
174
+ # Discriminator
175
+ y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
176
+ with autocast(enabled=False):
177
+ loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
178
+ loss_disc_all = loss_disc
179
+ optim_d.zero_grad()
180
+ scaler.scale(loss_disc_all).backward()
181
+ scaler.unscale_(optim_d)
182
+ grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
183
+ scaler.step(optim_d)
184
+
185
+ with autocast(enabled=hps.train.fp16_run):
186
+ # Generator
187
+ y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
188
+ with autocast(enabled=False):
189
+ loss_dur = torch.sum(l_length.float())
190
+ loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
191
+ loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
192
+
193
+ loss_fm = feature_loss(fmap_r, fmap_g)
194
+ loss_gen, losses_gen = generator_loss(y_d_hat_g)
195
+ loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
196
+ optim_g.zero_grad()
197
+ scaler.scale(loss_gen_all).backward()
198
+ scaler.unscale_(optim_g)
199
+ grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
200
+ scaler.step(optim_g)
201
+ scaler.update()
202
+
203
+ if rank==0:
204
+ if global_step % hps.train.log_interval == 0:
205
+ lr = optim_g.param_groups[0]['lr']
206
+ losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
207
+ logger.info('Train Epoch: {} [{:.0f}%]'.format(
208
+ epoch,
209
+ 100. * batch_idx / len(train_loader)))
210
+ logger.info([x.item() for x in losses] + [global_step, lr])
211
+
212
+ scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
213
+ scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
214
+
215
+ scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
216
+ scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
217
+ scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
218
+ image_dict = {
219
+ "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
220
+ "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
221
+ "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
222
+ "all/attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy())
223
+ }
224
+ utils.summarize(
225
+ writer=writer,
226
+ global_step=global_step,
227
+ images=image_dict,
228
+ scalars=scalar_dict)
229
+
230
+ if global_step % hps.train.eval_interval == 0:
231
+ evaluate(hps, net_g, eval_loader, writer_eval)
232
+ utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
233
+ utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
234
+ global_step += 1
235
+
236
+ if rank == 0:
237
+ logger.info('====> Epoch: {}'.format(epoch))
238
+
239
+
240
+ def evaluate(hps, generator, eval_loader, writer_eval):
241
+ generator.eval()
242
+ with torch.no_grad():
243
+ for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers) in enumerate(eval_loader):
244
+ x, x_lengths = x.cuda(0), x_lengths.cuda(0)
245
+ spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
246
+ y, y_lengths = y.cuda(0), y_lengths.cuda(0)
247
+ speakers = speakers.cuda(0)
248
+
249
+ # remove else
250
+ x = x[:1]
251
+ x_lengths = x_lengths[:1]
252
+ spec = spec[:1]
253
+ spec_lengths = spec_lengths[:1]
254
+ y = y[:1]
255
+ y_lengths = y_lengths[:1]
256
+ speakers = speakers[:1]
257
+ break
258
+ y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, max_len=1000)
259
+ y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length
260
+
261
+ mel = spec_to_mel_torch(
262
+ spec,
263
+ hps.data.filter_length,
264
+ hps.data.n_mel_channels,
265
+ hps.data.sampling_rate,
266
+ hps.data.mel_fmin,
267
+ hps.data.mel_fmax)
268
+ y_hat_mel = mel_spectrogram_torch(
269
+ y_hat.squeeze(1).float(),
270
+ hps.data.filter_length,
271
+ hps.data.n_mel_channels,
272
+ hps.data.sampling_rate,
273
+ hps.data.hop_length,
274
+ hps.data.win_length,
275
+ hps.data.mel_fmin,
276
+ hps.data.mel_fmax
277
+ )
278
+ image_dict = {
279
+ "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
280
+ }
281
+ audio_dict = {
282
+ "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
283
+ }
284
+ if global_step == 0:
285
+ image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
286
+ audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})
287
+
288
+ utils.summarize(
289
+ writer=writer_eval,
290
+ global_step=global_step,
291
+ images=image_dict,
292
+ audios=audio_dict,
293
+ audio_sampling_rate=hps.data.sampling_rate
294
+ )
295
+ generator.train()
296
+
297
+
298
+ if __name__ == "__main__":
299
+ main()
transforms.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+
4
+ import numpy as np
5
+
6
+
7
+ DEFAULT_MIN_BIN_WIDTH = 1e-3
8
+ DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
+ DEFAULT_MIN_DERIVATIVE = 1e-3
10
+
11
+
12
+ def piecewise_rational_quadratic_transform(inputs,
13
+ unnormalized_widths,
14
+ unnormalized_heights,
15
+ unnormalized_derivatives,
16
+ inverse=False,
17
+ tails=None,
18
+ tail_bound=1.,
19
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
22
+
23
+ if tails is None:
24
+ spline_fn = rational_quadratic_spline
25
+ spline_kwargs = {}
26
+ else:
27
+ spline_fn = unconstrained_rational_quadratic_spline
28
+ spline_kwargs = {
29
+ 'tails': tails,
30
+ 'tail_bound': tail_bound
31
+ }
32
+
33
+ outputs, logabsdet = spline_fn(
34
+ inputs=inputs,
35
+ unnormalized_widths=unnormalized_widths,
36
+ unnormalized_heights=unnormalized_heights,
37
+ unnormalized_derivatives=unnormalized_derivatives,
38
+ inverse=inverse,
39
+ min_bin_width=min_bin_width,
40
+ min_bin_height=min_bin_height,
41
+ min_derivative=min_derivative,
42
+ **spline_kwargs
43
+ )
44
+ return outputs, logabsdet
45
+
46
+
47
+ def searchsorted(bin_locations, inputs, eps=1e-6):
48
+ bin_locations[..., -1] += eps
49
+ return torch.sum(
50
+ inputs[..., None] >= bin_locations,
51
+ dim=-1
52
+ ) - 1
53
+
54
+
55
+ def unconstrained_rational_quadratic_spline(inputs,
56
+ unnormalized_widths,
57
+ unnormalized_heights,
58
+ unnormalized_derivatives,
59
+ inverse=False,
60
+ tails='linear',
61
+ tail_bound=1.,
62
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
65
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
+ outside_interval_mask = ~inside_interval_mask
67
+
68
+ outputs = torch.zeros_like(inputs)
69
+ logabsdet = torch.zeros_like(inputs)
70
+
71
+ if tails == 'linear':
72
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
+ constant = np.log(np.exp(1 - min_derivative) - 1)
74
+ unnormalized_derivatives[..., 0] = constant
75
+ unnormalized_derivatives[..., -1] = constant
76
+
77
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
+ logabsdet[outside_interval_mask] = 0
79
+ else:
80
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
81
+
82
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
+ inputs=inputs[inside_interval_mask],
84
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
+ inverse=inverse,
88
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
+ min_bin_width=min_bin_width,
90
+ min_bin_height=min_bin_height,
91
+ min_derivative=min_derivative
92
+ )
93
+
94
+ return outputs, logabsdet
95
+
96
+ def rational_quadratic_spline(inputs,
97
+ unnormalized_widths,
98
+ unnormalized_heights,
99
+ unnormalized_derivatives,
100
+ inverse=False,
101
+ left=0., right=1., bottom=0., top=1.,
102
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
105
+ if torch.min(inputs) < left or torch.max(inputs) > right:
106
+ raise ValueError('Input to a transform is not within its domain')
107
+
108
+ num_bins = unnormalized_widths.shape[-1]
109
+
110
+ if min_bin_width * num_bins > 1.0:
111
+ raise ValueError('Minimal bin width too large for the number of bins')
112
+ if min_bin_height * num_bins > 1.0:
113
+ raise ValueError('Minimal bin height too large for the number of bins')
114
+
115
+ widths = F.softmax(unnormalized_widths, dim=-1)
116
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
+ cumwidths = torch.cumsum(widths, dim=-1)
118
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
+ cumwidths = (right - left) * cumwidths + left
120
+ cumwidths[..., 0] = left
121
+ cumwidths[..., -1] = right
122
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
+
124
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
+
126
+ heights = F.softmax(unnormalized_heights, dim=-1)
127
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
+ cumheights = torch.cumsum(heights, dim=-1)
129
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
+ cumheights = (top - bottom) * cumheights + bottom
131
+ cumheights[..., 0] = bottom
132
+ cumheights[..., -1] = top
133
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
134
+
135
+ if inverse:
136
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
137
+ else:
138
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
+
140
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
+
143
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
+ delta = heights / widths
145
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
146
+
147
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
+
150
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
151
+
152
+ if inverse:
153
+ a = (((inputs - input_cumheights) * (input_derivatives
154
+ + input_derivatives_plus_one
155
+ - 2 * input_delta)
156
+ + input_heights * (input_delta - input_derivatives)))
157
+ b = (input_heights * input_derivatives
158
+ - (inputs - input_cumheights) * (input_derivatives
159
+ + input_derivatives_plus_one
160
+ - 2 * input_delta))
161
+ c = - input_delta * (inputs - input_cumheights)
162
+
163
+ discriminant = b.pow(2) - 4 * a * c
164
+ assert (discriminant >= 0).all()
165
+
166
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
167
+ outputs = root * input_bin_widths + input_cumwidths
168
+
169
+ theta_one_minus_theta = root * (1 - root)
170
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
+ * theta_one_minus_theta)
172
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
+ + 2 * input_delta * theta_one_minus_theta
174
+ + input_derivatives * (1 - root).pow(2))
175
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
+
177
+ return outputs, -logabsdet
178
+ else:
179
+ theta = (inputs - input_cumwidths) / input_bin_widths
180
+ theta_one_minus_theta = theta * (1 - theta)
181
+
182
+ numerator = input_heights * (input_delta * theta.pow(2)
183
+ + input_derivatives * theta_one_minus_theta)
184
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
+ * theta_one_minus_theta)
186
+ outputs = input_cumheights + numerator / denominator
187
+
188
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
+ + 2 * input_delta * theta_one_minus_theta
190
+ + input_derivatives * (1 - theta).pow(2))
191
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
+
193
+ return outputs, logabsdet
utils.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import sys
4
+ import argparse
5
+ import logging
6
+ import json
7
+ import subprocess
8
+ import numpy as np
9
+ from scipy.io.wavfile import read
10
+ import torch
11
+
12
+ MATPLOTLIB_FLAG = False
13
+
14
+ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
+ logger = logging
16
+
17
+
18
+ def load_checkpoint(checkpoint_path, model, optimizer=None):
19
+ assert os.path.isfile(checkpoint_path)
20
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
+ iteration = checkpoint_dict['iteration']
22
+ learning_rate = checkpoint_dict['learning_rate']
23
+ if optimizer is not None:
24
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
+ saved_state_dict = checkpoint_dict['model']
26
+ if hasattr(model, 'module'):
27
+ state_dict = model.module.state_dict()
28
+ else:
29
+ state_dict = model.state_dict()
30
+ new_state_dict= {}
31
+ for k, v in state_dict.items():
32
+ try:
33
+ new_state_dict[k] = saved_state_dict[k]
34
+ except:
35
+ logger.info("%s is not in the checkpoint" % k)
36
+ new_state_dict[k] = v
37
+ if hasattr(model, 'module'):
38
+ model.module.load_state_dict(new_state_dict)
39
+ else:
40
+ model.load_state_dict(new_state_dict)
41
+ logger.info("Loaded checkpoint '{}' (iteration {})" .format(
42
+ checkpoint_path, iteration))
43
+ return model, optimizer, learning_rate, iteration
44
+
45
+
46
+ def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
47
+ logger.info("Saving model and optimizer state at iteration {} to {}".format(
48
+ iteration, checkpoint_path))
49
+ if hasattr(model, 'module'):
50
+ state_dict = model.module.state_dict()
51
+ else:
52
+ state_dict = model.state_dict()
53
+ torch.save({'model': state_dict,
54
+ 'iteration': iteration,
55
+ 'optimizer': optimizer.state_dict(),
56
+ 'learning_rate': learning_rate}, checkpoint_path)
57
+
58
+
59
+ def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
60
+ for k, v in scalars.items():
61
+ writer.add_scalar(k, v, global_step)
62
+ for k, v in histograms.items():
63
+ writer.add_histogram(k, v, global_step)
64
+ for k, v in images.items():
65
+ writer.add_image(k, v, global_step, dataformats='HWC')
66
+ for k, v in audios.items():
67
+ writer.add_audio(k, v, global_step, audio_sampling_rate)
68
+
69
+
70
+ def latest_checkpoint_path(dir_path, regex="G_*.pth"):
71
+ f_list = glob.glob(os.path.join(dir_path, regex))
72
+ f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
73
+ x = f_list[-1]
74
+ print(x)
75
+ return x
76
+
77
+
78
+ def plot_spectrogram_to_numpy(spectrogram):
79
+ global MATPLOTLIB_FLAG
80
+ if not MATPLOTLIB_FLAG:
81
+ import matplotlib
82
+ matplotlib.use("Agg")
83
+ MATPLOTLIB_FLAG = True
84
+ mpl_logger = logging.getLogger('matplotlib')
85
+ mpl_logger.setLevel(logging.WARNING)
86
+ import matplotlib.pylab as plt
87
+ import numpy as np
88
+
89
+ fig, ax = plt.subplots(figsize=(10,2))
90
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
91
+ interpolation='none')
92
+ plt.colorbar(im, ax=ax)
93
+ plt.xlabel("Frames")
94
+ plt.ylabel("Channels")
95
+ plt.tight_layout()
96
+
97
+ fig.canvas.draw()
98
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
99
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
100
+ plt.close()
101
+ return data
102
+
103
+
104
+ def plot_alignment_to_numpy(alignment, info=None):
105
+ global MATPLOTLIB_FLAG
106
+ if not MATPLOTLIB_FLAG:
107
+ import matplotlib
108
+ matplotlib.use("Agg")
109
+ MATPLOTLIB_FLAG = True
110
+ mpl_logger = logging.getLogger('matplotlib')
111
+ mpl_logger.setLevel(logging.WARNING)
112
+ import matplotlib.pylab as plt
113
+ import numpy as np
114
+
115
+ fig, ax = plt.subplots(figsize=(6, 4))
116
+ im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
117
+ interpolation='none')
118
+ fig.colorbar(im, ax=ax)
119
+ xlabel = 'Decoder timestep'
120
+ if info is not None:
121
+ xlabel += '\n\n' + info
122
+ plt.xlabel(xlabel)
123
+ plt.ylabel('Encoder timestep')
124
+ plt.tight_layout()
125
+
126
+ fig.canvas.draw()
127
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
128
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
129
+ plt.close()
130
+ return data
131
+
132
+
133
+ def load_wav_to_torch(full_path):
134
+ sampling_rate, data = read(full_path)
135
+ return torch.FloatTensor(data.astype(np.float32)), sampling_rate
136
+
137
+
138
+ def load_filepaths_and_text(filename, split="|"):
139
+ with open(filename, encoding='utf-8') as f:
140
+ filepaths_and_text = [line.strip().split(split) for line in f]
141
+ return filepaths_and_text
142
+
143
+
144
+ def get_hparams(init=True):
145
+ parser = argparse.ArgumentParser()
146
+ parser.add_argument('-c', '--config', type=str, default="/tts_koni/configs/japanese_base.json",
147
+ help='JSON file for configuration')
148
+
149
+ #parser.add_argument('-m', '--model', type=str, required=True,
150
+ #help='Model name')
151
+
152
+ parser.add_argument('-m', '--model', type=str, default="japanese_base",
153
+ help='Model name')
154
+
155
+ args = parser.parse_args()
156
+ model_dir = os.path.join("/tts_koni/MyDrive", args.model) #
157
+
158
+ if not os.path.exists(model_dir):
159
+ os.makedirs(model_dir)
160
+
161
+ config_path = args.config
162
+ config_save_path = os.path.join(model_dir, "config.json")
163
+ if init:
164
+ with open(config_path, "r") as f:
165
+ data = f.read()
166
+ with open(config_save_path, "w") as f:
167
+ f.write(data)
168
+ else:
169
+ with open(config_save_path, "r") as f:
170
+ data = f.read()
171
+ config = json.loads(data)
172
+
173
+ hparams = HParams(**config)
174
+ hparams.model_dir = model_dir
175
+ return hparams
176
+
177
+
178
+ def get_hparams_from_dir(model_dir):
179
+ config_save_path = os.path.join(model_dir, "config.json")
180
+ with open(config_save_path, "r") as f:
181
+ data = f.read()
182
+ config = json.loads(data)
183
+
184
+ hparams =HParams(**config)
185
+ hparams.model_dir = model_dir
186
+ return hparams
187
+
188
+
189
+ def get_hparams_from_file(config_path):
190
+ with open(config_path, "r") as f:
191
+ data = f.read()
192
+ config = json.loads(data)
193
+
194
+ hparams =HParams(**config)
195
+ return hparams
196
+
197
+
198
+ def check_git_hash(model_dir):
199
+ source_dir = os.path.dirname(os.path.realpath(__file__))
200
+ if not os.path.exists(os.path.join(source_dir, ".git")):
201
+ logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
202
+ source_dir
203
+ ))
204
+ return
205
+
206
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
207
+
208
+ path = os.path.join(model_dir, "githash")
209
+ if os.path.exists(path):
210
+ saved_hash = open(path).read()
211
+ if saved_hash != cur_hash:
212
+ logger.warn("git hash values are different. {}(saved) != {}(current)".format(
213
+ saved_hash[:8], cur_hash[:8]))
214
+ else:
215
+ open(path, "w").write(cur_hash)
216
+
217
+
218
+ def get_logger(model_dir, filename="train.log"):
219
+ global logger
220
+ logger = logging.getLogger(os.path.basename(model_dir))
221
+ logger.setLevel(logging.DEBUG)
222
+
223
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
224
+ if not os.path.exists(model_dir):
225
+ os.makedirs(model_dir)
226
+ h = logging.FileHandler(os.path.join(model_dir, filename))
227
+ h.setLevel(logging.DEBUG)
228
+ h.setFormatter(formatter)
229
+ logger.addHandler(h)
230
+ return logger
231
+
232
+
233
+ class HParams():
234
+ def __init__(self, **kwargs):
235
+ for k, v in kwargs.items():
236
+ if type(v) == dict:
237
+ v = HParams(**v)
238
+ self[k] = v
239
+
240
+ def keys(self):
241
+ return self.__dict__.keys()
242
+
243
+ def items(self):
244
+ return self.__dict__.items()
245
+
246
+ def values(self):
247
+ return self.__dict__.values()
248
+
249
+ def __len__(self):
250
+ return len(self.__dict__)
251
+
252
+ def __getitem__(self, key):
253
+ return getattr(self, key)
254
+
255
+ def __setitem__(self, key, value):
256
+ return setattr(self, key, value)
257
+
258
+ def __contains__(self, key):
259
+ return key in self.__dict__
260
+
261
+ def __repr__(self):
262
+ return self.__dict__.__repr__()