AAOBA commited on
Commit
9bd9742
1 Parent(s): 997b8ff

first commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +250 -0
  2. attentions.py +464 -0
  3. bert/bert-base-japanese-v3/README.md +53 -0
  4. bert/bert-base-japanese-v3/config.json +19 -0
  5. bert/bert-base-japanese-v3/tokenizer_config.json +10 -0
  6. bert/bert-base-japanese-v3/vocab.txt +0 -0
  7. bert/chinese-roberta-wwm-ext-large/.gitattributes +9 -0
  8. bert/chinese-roberta-wwm-ext-large/.gitignore +1 -0
  9. bert/chinese-roberta-wwm-ext-large/README.md +57 -0
  10. bert/chinese-roberta-wwm-ext-large/added_tokens.json +1 -0
  11. bert/chinese-roberta-wwm-ext-large/config.json +28 -0
  12. bert/chinese-roberta-wwm-ext-large/special_tokens_map.json +1 -0
  13. bert/chinese-roberta-wwm-ext-large/tokenizer.json +0 -0
  14. bert/chinese-roberta-wwm-ext-large/tokenizer_config.json +1 -0
  15. bert/chinese-roberta-wwm-ext-large/vocab.txt +0 -0
  16. bert_gen.py +59 -0
  17. commons.py +160 -0
  18. data_utils.py +406 -0
  19. info.md +27 -0
  20. losses.py +58 -0
  21. mel_processing.py +139 -0
  22. models.py +986 -0
  23. models/DUR_90000.pth +3 -0
  24. models/D_90000.pth +3 -0
  25. models/G_90000.pth +3 -0
  26. models/config.json +198 -0
  27. modules.py +597 -0
  28. monotonic_align/__init__.py +16 -0
  29. monotonic_align/__pycache__/__init__.cpython-39.pyc +0 -0
  30. monotonic_align/__pycache__/core.cpython-39.pyc +0 -0
  31. monotonic_align/core.py +46 -0
  32. preprocess_text.py +120 -0
  33. requirements.txt +23 -0
  34. resample.py +48 -0
  35. server.py +170 -0
  36. text/__init__.py +28 -0
  37. text/__pycache__/__init__.cpython-39.pyc +0 -0
  38. text/__pycache__/chinese.cpython-39.pyc +0 -0
  39. text/__pycache__/chinese_bert.cpython-39.pyc +0 -0
  40. text/__pycache__/cleaner.cpython-39.pyc +0 -0
  41. text/__pycache__/english_bert_mock.cpython-39.pyc +0 -0
  42. text/__pycache__/japanese.cpython-39.pyc +0 -0
  43. text/__pycache__/japanese_bert.cpython-39.pyc +0 -0
  44. text/__pycache__/symbols.cpython-39.pyc +0 -0
  45. text/__pycache__/tone_sandhi.cpython-39.pyc +0 -0
  46. text/chinese.py +198 -0
  47. text/chinese_bert.py +100 -0
  48. text/cleaner.py +28 -0
  49. text/cmudict.rep +0 -0
  50. text/cmudict_cache.pickle +3 -0
app.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: E402
2
+
3
+ import sys, os
4
+
5
+ import logging
6
+
7
+ logging.getLogger("numba").setLevel(logging.WARNING)
8
+ logging.getLogger("markdown_it").setLevel(logging.WARNING)
9
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
10
+ logging.getLogger("matplotlib").setLevel(logging.WARNING)
11
+
12
+ logging.basicConfig(
13
+ level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
14
+ )
15
+
16
+ logger = logging.getLogger(__name__)
17
+ import torch
18
+ import argparse
19
+ import commons
20
+ import utils
21
+ from models import SynthesizerTrn
22
+ from text.symbols import symbols
23
+ from text import cleaned_text_to_sequence, get_bert
24
+ from text.cleaner import clean_text
25
+ import gradio as gr
26
+ import webbrowser
27
+ import numpy as np
28
+
29
+
30
+ net_g = None
31
+
32
+ if sys.platform == "darwin" and torch.backends.mps.is_available():
33
+ device = "mps"
34
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
35
+ else:
36
+ device = "cpu"
37
+
38
+
39
+ def get_text(text, language_str, hps):
40
+ norm_text, phone, tone, word2ph = clean_text(text, language_str)
41
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
42
+
43
+ if hps.data.add_blank:
44
+ phone = commons.intersperse(phone, 0)
45
+ tone = commons.intersperse(tone, 0)
46
+ language = commons.intersperse(language, 0)
47
+ for i in range(len(word2ph)):
48
+ word2ph[i] = word2ph[i] * 2
49
+ word2ph[0] += 1
50
+ bert = get_bert(norm_text, word2ph, language_str, device)
51
+ del word2ph
52
+ assert bert.shape[-1] == len(phone), phone
53
+
54
+ if language_str == "ZH":
55
+ bert = bert
56
+ ja_bert = torch.zeros(768, len(phone))
57
+ elif language_str == "JP":
58
+ ja_bert = bert
59
+ bert = torch.zeros(1024, len(phone))
60
+ else:
61
+ bert = torch.zeros(1024, len(phone))
62
+ ja_bert = torch.zeros(768, len(phone))
63
+
64
+ assert bert.shape[-1] == len(
65
+ phone
66
+ ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
67
+
68
+ phone = torch.LongTensor(phone)
69
+ tone = torch.LongTensor(tone)
70
+ language = torch.LongTensor(language)
71
+ return bert, ja_bert, phone, tone, language
72
+
73
+
74
+ def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
75
+ global net_g
76
+ bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
77
+ with torch.no_grad():
78
+ x_tst = phones.to(device).unsqueeze(0)
79
+ tones = tones.to(device).unsqueeze(0)
80
+ lang_ids = lang_ids.to(device).unsqueeze(0)
81
+ bert = bert.to(device).unsqueeze(0)
82
+ ja_bert = ja_bert.to(device).unsqueeze(0)
83
+ x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
84
+ del phones
85
+ speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
86
+ audio = (
87
+ net_g.infer(
88
+ x_tst,
89
+ x_tst_lengths,
90
+ speakers,
91
+ tones,
92
+ lang_ids,
93
+ bert,
94
+ ja_bert,
95
+ sdp_ratio=sdp_ratio,
96
+ noise_scale=noise_scale,
97
+ noise_scale_w=noise_scale_w,
98
+ length_scale=length_scale,
99
+ )[0][0, 0]
100
+ .data.cpu()
101
+ .float()
102
+ .numpy()
103
+ )
104
+ del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
105
+ torch.cuda.empty_cache()
106
+ return audio
107
+
108
+
109
+ def tts_fn(
110
+ text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, language
111
+ ):
112
+ print(f"text: {text}, speaker: {speaker}")
113
+ slices = text.split("\n")
114
+ audio_list = []
115
+ with torch.no_grad():
116
+ for slice in slices:
117
+ audio = infer(
118
+ slice,
119
+ sdp_ratio=sdp_ratio,
120
+ noise_scale=noise_scale,
121
+ noise_scale_w=noise_scale_w,
122
+ length_scale=length_scale,
123
+ sid=speaker,
124
+ language=language,
125
+ )
126
+ audio_list.append(audio)
127
+ silence = np.zeros(hps.data.sampling_rate) # 生成1秒的静音
128
+ audio_list.append(silence) # 将静音添加到列表中
129
+ audio_concat = np.concatenate(audio_list)
130
+ return "Success", (hps.data.sampling_rate, audio_concat)
131
+
132
+
133
+ if __name__ == "__main__":
134
+ parser = argparse.ArgumentParser()
135
+ parser.add_argument(
136
+ "-m", "--model", default="./models/G_90000.pth", help="path of your model"
137
+ )
138
+ parser.add_argument(
139
+ "-c",
140
+ "--config",
141
+ default="./models/config.json",
142
+ help="path of your config file",
143
+ )
144
+ parser.add_argument(
145
+ "--share", default=False, help="make link public", action="store_true"
146
+ )
147
+ parser.add_argument(
148
+ "-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log"
149
+ )
150
+ parser.add_argument(
151
+ "--info_md", default='./info.md', help="info markdown file"
152
+ )
153
+
154
+ args = parser.parse_args()
155
+ if args.debug:
156
+ logger.info("Enable DEBUG-LEVEL log")
157
+ logging.basicConfig(level=logging.DEBUG)
158
+ hps = utils.get_hparams_from_file(args.config)
159
+
160
+ device = (
161
+ "cuda:0"
162
+ if torch.cuda.is_available()
163
+ else (
164
+ "mps"
165
+ if sys.platform == "darwin" and torch.backends.mps.is_available()
166
+ else "cpu"
167
+ )
168
+ )
169
+ net_g = SynthesizerTrn(
170
+ len(symbols),
171
+ hps.data.filter_length // 2 + 1,
172
+ hps.train.segment_size // hps.data.hop_length,
173
+ n_speakers=hps.data.n_speakers,
174
+ **hps.model,
175
+ ).to(device)
176
+ _ = net_g.eval()
177
+
178
+ _ = utils.load_checkpoint(args.model, net_g, None, skip_optimizer=True)
179
+
180
+ speaker_ids = hps.data.spk2id
181
+ speakers = list(speaker_ids.keys())
182
+ languages = ["JP"]
183
+ with gr.Blocks(title="Umamusume-DeBERTa-VITS2") as app:
184
+ with gr.Row():
185
+ with gr.Column():
186
+ text = gr.TextArea(
187
+ label="Text",
188
+ placeholder="Input Text Here",
189
+ value="張り切って行こう!",
190
+ )
191
+ speaker = gr.Dropdown(
192
+ choices=speakers, value=speakers[0], label="Speaker"
193
+ )
194
+ sdp_ratio = gr.Slider(
195
+ minimum=0, maximum=1, value=0.3, step=0.05, label="SDP Ratio"
196
+ )
197
+ noise_scale = gr.Slider(
198
+ minimum=0.1, maximum=2, value=0.6, step=0.05, label="Noise Scale"
199
+ )
200
+ noise_scale_w = gr.Slider(
201
+ minimum=0.1, maximum=2, value=0.8, step=0.05, label="Noise Scale W"
202
+ )
203
+ length_scale = gr.Slider(
204
+ minimum=0.1, maximum=2, value=1, step=0.05, label="Length Scale"
205
+ )
206
+ language = gr.Dropdown(
207
+ choices=languages, value=languages[0], label="Language"
208
+ )
209
+ btn = gr.Button("Generate!", variant="primary")
210
+ with gr.Column():
211
+ text_output = gr.Textbox(label="Message")
212
+ audio_output = gr.Audio(label="Output Audio")
213
+ samples = gr.Textbox(label="WEIRD Samples Given By GPT-4")
214
+ samples.value = "⚠ 强烈不建议将所有内容扔进输入,这会导致相当久的推理时间 ⚠\n" \
215
+ "⚠ すべての内容をお入りになることがお勧めしませんで、生成時間が非常に長くなるでしょう ⚠\n" \
216
+ "⚠ Throwing Everything into text input leads to unexpected long inference time ⚠\n" \
217
+ "おはよう、今日も一緒に頑張りましょうね!\n" \
218
+ + "ねえねえ、あなたの好きなお料理作ってあげるよ!\n" \
219
+ + "きゃー!びっくりさせないでよ~!\n" \
220
+ + "あのね、新しいドレス買ったの。どう思う?\n" \
221
+ + "あっ、遅くなっちゃった!ごめんなさい!\n" \
222
+ + "今日のデート、すごく楽しかったよ!また行きましょうね!\n" \
223
+ + "私のこと、好き?\n" \
224
+ + "あなたといると、時間があっという間に過ぎちゃうね。\n" \
225
+ + "あたし、あなたが大好きだよ。\n" \
226
+ + "ねえ、もっと話して!あなたの話、大好きなの!\n" \
227
+ + "あっ、それ可愛いね!私に似合うかな?\n" \
228
+ + "あなたのこと、ずっと考えてたんだよ。\n" \
229
+ + "今日はどんな一日だった?私にも話して!\n" \
230
+ + "あなたの笑顔、大好き!もっと見せて!\n" \
231
+ + "おやすみ、いい夢見てね!"
232
+ with open(args.info_md, 'r', encoding='UTF-8') as file:
233
+ data = file.read()
234
+ md_info = gr.Markdown(data)
235
+
236
+ btn.click(
237
+ tts_fn,
238
+ inputs=[
239
+ text,
240
+ speaker,
241
+ sdp_ratio,
242
+ noise_scale,
243
+ noise_scale_w,
244
+ length_scale,
245
+ language,
246
+ ],
247
+ outputs=[text_output, audio_output],
248
+ )
249
+ webbrowser.open("http://127.0.0.1:26860")
250
+ app.launch()
attentions.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ import commons
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class LayerNorm(nn.Module):
13
+ def __init__(self, channels, eps=1e-5):
14
+ super().__init__()
15
+ self.channels = channels
16
+ self.eps = eps
17
+
18
+ self.gamma = nn.Parameter(torch.ones(channels))
19
+ self.beta = nn.Parameter(torch.zeros(channels))
20
+
21
+ def forward(self, x):
22
+ x = x.transpose(1, -1)
23
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
24
+ return x.transpose(1, -1)
25
+
26
+
27
+ @torch.jit.script
28
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
29
+ n_channels_int = n_channels[0]
30
+ in_act = input_a + input_b
31
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
32
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
33
+ acts = t_act * s_act
34
+ return acts
35
+
36
+
37
+ class Encoder(nn.Module):
38
+ def __init__(
39
+ self,
40
+ hidden_channels,
41
+ filter_channels,
42
+ n_heads,
43
+ n_layers,
44
+ kernel_size=1,
45
+ p_dropout=0.0,
46
+ window_size=4,
47
+ isflow=True,
48
+ **kwargs
49
+ ):
50
+ super().__init__()
51
+ self.hidden_channels = hidden_channels
52
+ self.filter_channels = filter_channels
53
+ self.n_heads = n_heads
54
+ self.n_layers = n_layers
55
+ self.kernel_size = kernel_size
56
+ self.p_dropout = p_dropout
57
+ self.window_size = window_size
58
+ # if isflow:
59
+ # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
60
+ # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
61
+ # self.cond_layer = weight_norm(cond_layer, name='weight')
62
+ # self.gin_channels = 256
63
+ self.cond_layer_idx = self.n_layers
64
+ if "gin_channels" in kwargs:
65
+ self.gin_channels = kwargs["gin_channels"]
66
+ if self.gin_channels != 0:
67
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
68
+ # vits2 says 3rd block, so idx is 2 by default
69
+ self.cond_layer_idx = (
70
+ kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
71
+ )
72
+ # logging.debug(self.gin_channels, self.cond_layer_idx)
73
+ assert (
74
+ self.cond_layer_idx < self.n_layers
75
+ ), "cond_layer_idx should be less than n_layers"
76
+ self.drop = nn.Dropout(p_dropout)
77
+ self.attn_layers = nn.ModuleList()
78
+ self.norm_layers_1 = nn.ModuleList()
79
+ self.ffn_layers = nn.ModuleList()
80
+ self.norm_layers_2 = nn.ModuleList()
81
+ for i in range(self.n_layers):
82
+ self.attn_layers.append(
83
+ MultiHeadAttention(
84
+ hidden_channels,
85
+ hidden_channels,
86
+ n_heads,
87
+ p_dropout=p_dropout,
88
+ window_size=window_size,
89
+ )
90
+ )
91
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
92
+ self.ffn_layers.append(
93
+ FFN(
94
+ hidden_channels,
95
+ hidden_channels,
96
+ filter_channels,
97
+ kernel_size,
98
+ p_dropout=p_dropout,
99
+ )
100
+ )
101
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
102
+
103
+ def forward(self, x, x_mask, g=None):
104
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
105
+ x = x * x_mask
106
+ for i in range(self.n_layers):
107
+ if i == self.cond_layer_idx and g is not None:
108
+ g = self.spk_emb_linear(g.transpose(1, 2))
109
+ g = g.transpose(1, 2)
110
+ x = x + g
111
+ x = x * x_mask
112
+ y = self.attn_layers[i](x, x, attn_mask)
113
+ y = self.drop(y)
114
+ x = self.norm_layers_1[i](x + y)
115
+
116
+ y = self.ffn_layers[i](x, x_mask)
117
+ y = self.drop(y)
118
+ x = self.norm_layers_2[i](x + y)
119
+ x = x * x_mask
120
+ return x
121
+
122
+
123
+ class Decoder(nn.Module):
124
+ def __init__(
125
+ self,
126
+ hidden_channels,
127
+ filter_channels,
128
+ n_heads,
129
+ n_layers,
130
+ kernel_size=1,
131
+ p_dropout=0.0,
132
+ proximal_bias=False,
133
+ proximal_init=True,
134
+ **kwargs
135
+ ):
136
+ super().__init__()
137
+ self.hidden_channels = hidden_channels
138
+ self.filter_channels = filter_channels
139
+ self.n_heads = n_heads
140
+ self.n_layers = n_layers
141
+ self.kernel_size = kernel_size
142
+ self.p_dropout = p_dropout
143
+ self.proximal_bias = proximal_bias
144
+ self.proximal_init = proximal_init
145
+
146
+ self.drop = nn.Dropout(p_dropout)
147
+ self.self_attn_layers = nn.ModuleList()
148
+ self.norm_layers_0 = nn.ModuleList()
149
+ self.encdec_attn_layers = nn.ModuleList()
150
+ self.norm_layers_1 = nn.ModuleList()
151
+ self.ffn_layers = nn.ModuleList()
152
+ self.norm_layers_2 = nn.ModuleList()
153
+ for i in range(self.n_layers):
154
+ self.self_attn_layers.append(
155
+ MultiHeadAttention(
156
+ hidden_channels,
157
+ hidden_channels,
158
+ n_heads,
159
+ p_dropout=p_dropout,
160
+ proximal_bias=proximal_bias,
161
+ proximal_init=proximal_init,
162
+ )
163
+ )
164
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
165
+ self.encdec_attn_layers.append(
166
+ MultiHeadAttention(
167
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
168
+ )
169
+ )
170
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
171
+ self.ffn_layers.append(
172
+ FFN(
173
+ hidden_channels,
174
+ hidden_channels,
175
+ filter_channels,
176
+ kernel_size,
177
+ p_dropout=p_dropout,
178
+ causal=True,
179
+ )
180
+ )
181
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
182
+
183
+ def forward(self, x, x_mask, h, h_mask):
184
+ """
185
+ x: decoder input
186
+ h: encoder output
187
+ """
188
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
189
+ device=x.device, dtype=x.dtype
190
+ )
191
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
192
+ x = x * x_mask
193
+ for i in range(self.n_layers):
194
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
195
+ y = self.drop(y)
196
+ x = self.norm_layers_0[i](x + y)
197
+
198
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
199
+ y = self.drop(y)
200
+ x = self.norm_layers_1[i](x + y)
201
+
202
+ y = self.ffn_layers[i](x, x_mask)
203
+ y = self.drop(y)
204
+ x = self.norm_layers_2[i](x + y)
205
+ x = x * x_mask
206
+ return x
207
+
208
+
209
+ class MultiHeadAttention(nn.Module):
210
+ def __init__(
211
+ self,
212
+ channels,
213
+ out_channels,
214
+ n_heads,
215
+ p_dropout=0.0,
216
+ window_size=None,
217
+ heads_share=True,
218
+ block_length=None,
219
+ proximal_bias=False,
220
+ proximal_init=False,
221
+ ):
222
+ super().__init__()
223
+ assert channels % n_heads == 0
224
+
225
+ self.channels = channels
226
+ self.out_channels = out_channels
227
+ self.n_heads = n_heads
228
+ self.p_dropout = p_dropout
229
+ self.window_size = window_size
230
+ self.heads_share = heads_share
231
+ self.block_length = block_length
232
+ self.proximal_bias = proximal_bias
233
+ self.proximal_init = proximal_init
234
+ self.attn = None
235
+
236
+ self.k_channels = channels // n_heads
237
+ self.conv_q = nn.Conv1d(channels, channels, 1)
238
+ self.conv_k = nn.Conv1d(channels, channels, 1)
239
+ self.conv_v = nn.Conv1d(channels, channels, 1)
240
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
241
+ self.drop = nn.Dropout(p_dropout)
242
+
243
+ if window_size is not None:
244
+ n_heads_rel = 1 if heads_share else n_heads
245
+ rel_stddev = self.k_channels**-0.5
246
+ self.emb_rel_k = nn.Parameter(
247
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
248
+ * rel_stddev
249
+ )
250
+ self.emb_rel_v = nn.Parameter(
251
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
252
+ * rel_stddev
253
+ )
254
+
255
+ nn.init.xavier_uniform_(self.conv_q.weight)
256
+ nn.init.xavier_uniform_(self.conv_k.weight)
257
+ nn.init.xavier_uniform_(self.conv_v.weight)
258
+ if proximal_init:
259
+ with torch.no_grad():
260
+ self.conv_k.weight.copy_(self.conv_q.weight)
261
+ self.conv_k.bias.copy_(self.conv_q.bias)
262
+
263
+ def forward(self, x, c, attn_mask=None):
264
+ q = self.conv_q(x)
265
+ k = self.conv_k(c)
266
+ v = self.conv_v(c)
267
+
268
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
269
+
270
+ x = self.conv_o(x)
271
+ return x
272
+
273
+ def attention(self, query, key, value, mask=None):
274
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
275
+ b, d, t_s, t_t = (*key.size(), query.size(2))
276
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
277
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
278
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
279
+
280
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
281
+ if self.window_size is not None:
282
+ assert (
283
+ t_s == t_t
284
+ ), "Relative attention is only available for self-attention."
285
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
286
+ rel_logits = self._matmul_with_relative_keys(
287
+ query / math.sqrt(self.k_channels), key_relative_embeddings
288
+ )
289
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
290
+ scores = scores + scores_local
291
+ if self.proximal_bias:
292
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
293
+ scores = scores + self._attention_bias_proximal(t_s).to(
294
+ device=scores.device, dtype=scores.dtype
295
+ )
296
+ if mask is not None:
297
+ scores = scores.masked_fill(mask == 0, -1e4)
298
+ if self.block_length is not None:
299
+ assert (
300
+ t_s == t_t
301
+ ), "Local attention is only available for self-attention."
302
+ block_mask = (
303
+ torch.ones_like(scores)
304
+ .triu(-self.block_length)
305
+ .tril(self.block_length)
306
+ )
307
+ scores = scores.masked_fill(block_mask == 0, -1e4)
308
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
309
+ p_attn = self.drop(p_attn)
310
+ output = torch.matmul(p_attn, value)
311
+ if self.window_size is not None:
312
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
313
+ value_relative_embeddings = self._get_relative_embeddings(
314
+ self.emb_rel_v, t_s
315
+ )
316
+ output = output + self._matmul_with_relative_values(
317
+ relative_weights, value_relative_embeddings
318
+ )
319
+ output = (
320
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
321
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
322
+ return output, p_attn
323
+
324
+ def _matmul_with_relative_values(self, x, y):
325
+ """
326
+ x: [b, h, l, m]
327
+ y: [h or 1, m, d]
328
+ ret: [b, h, l, d]
329
+ """
330
+ ret = torch.matmul(x, y.unsqueeze(0))
331
+ return ret
332
+
333
+ def _matmul_with_relative_keys(self, x, y):
334
+ """
335
+ x: [b, h, l, d]
336
+ y: [h or 1, m, d]
337
+ ret: [b, h, l, m]
338
+ """
339
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
340
+ return ret
341
+
342
+ def _get_relative_embeddings(self, relative_embeddings, length):
343
+ 2 * self.window_size + 1
344
+ # Pad first before slice to avoid using cond ops.
345
+ pad_length = max(length - (self.window_size + 1), 0)
346
+ slice_start_position = max((self.window_size + 1) - length, 0)
347
+ slice_end_position = slice_start_position + 2 * length - 1
348
+ if pad_length > 0:
349
+ padded_relative_embeddings = F.pad(
350
+ relative_embeddings,
351
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
352
+ )
353
+ else:
354
+ padded_relative_embeddings = relative_embeddings
355
+ used_relative_embeddings = padded_relative_embeddings[
356
+ :, slice_start_position:slice_end_position
357
+ ]
358
+ return used_relative_embeddings
359
+
360
+ def _relative_position_to_absolute_position(self, x):
361
+ """
362
+ x: [b, h, l, 2*l-1]
363
+ ret: [b, h, l, l]
364
+ """
365
+ batch, heads, length, _ = x.size()
366
+ # Concat columns of pad to shift from relative to absolute indexing.
367
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
368
+
369
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
370
+ x_flat = x.view([batch, heads, length * 2 * length])
371
+ x_flat = F.pad(
372
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
373
+ )
374
+
375
+ # Reshape and slice out the padded elements.
376
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
377
+ :, :, :length, length - 1 :
378
+ ]
379
+ return x_final
380
+
381
+ def _absolute_position_to_relative_position(self, x):
382
+ """
383
+ x: [b, h, l, l]
384
+ ret: [b, h, l, 2*l-1]
385
+ """
386
+ batch, heads, length, _ = x.size()
387
+ # pad along column
388
+ x = F.pad(
389
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
390
+ )
391
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
392
+ # add 0's in the beginning that will skew the elements after reshape
393
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
394
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
395
+ return x_final
396
+
397
+ def _attention_bias_proximal(self, length):
398
+ """Bias for self-attention to encourage attention to close positions.
399
+ Args:
400
+ length: an integer scalar.
401
+ Returns:
402
+ a Tensor with shape [1, 1, length, length]
403
+ """
404
+ r = torch.arange(length, dtype=torch.float32)
405
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
406
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
407
+
408
+
409
+ class FFN(nn.Module):
410
+ def __init__(
411
+ self,
412
+ in_channels,
413
+ out_channels,
414
+ filter_channels,
415
+ kernel_size,
416
+ p_dropout=0.0,
417
+ activation=None,
418
+ causal=False,
419
+ ):
420
+ super().__init__()
421
+ self.in_channels = in_channels
422
+ self.out_channels = out_channels
423
+ self.filter_channels = filter_channels
424
+ self.kernel_size = kernel_size
425
+ self.p_dropout = p_dropout
426
+ self.activation = activation
427
+ self.causal = causal
428
+
429
+ if causal:
430
+ self.padding = self._causal_padding
431
+ else:
432
+ self.padding = self._same_padding
433
+
434
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
435
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
436
+ self.drop = nn.Dropout(p_dropout)
437
+
438
+ def forward(self, x, x_mask):
439
+ x = self.conv_1(self.padding(x * x_mask))
440
+ if self.activation == "gelu":
441
+ x = x * torch.sigmoid(1.702 * x)
442
+ else:
443
+ x = torch.relu(x)
444
+ x = self.drop(x)
445
+ x = self.conv_2(self.padding(x * x_mask))
446
+ return x * x_mask
447
+
448
+ def _causal_padding(self, x):
449
+ if self.kernel_size == 1:
450
+ return x
451
+ pad_l = self.kernel_size - 1
452
+ pad_r = 0
453
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
454
+ x = F.pad(x, commons.convert_pad_shape(padding))
455
+ return x
456
+
457
+ def _same_padding(self, x):
458
+ if self.kernel_size == 1:
459
+ return x
460
+ pad_l = (self.kernel_size - 1) // 2
461
+ pad_r = self.kernel_size // 2
462
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
463
+ x = F.pad(x, commons.convert_pad_shape(padding))
464
+ return x
bert/bert-base-japanese-v3/README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - cc100
5
+ - wikipedia
6
+ language:
7
+ - ja
8
+ widget:
9
+ - text: 東北大学で[MASK]の研究をしています。
10
+ ---
11
+
12
+ # BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
13
+
14
+ This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
15
+
16
+ This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
17
+ Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
18
+
19
+ The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
20
+
21
+ ## Model architecture
22
+
23
+ The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
24
+
25
+ ## Training Data
26
+
27
+ The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
28
+ For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
29
+ The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
30
+
31
+ For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
32
+
33
+ ## Tokenization
34
+
35
+ The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
36
+ The vocabulary size is 32768.
37
+
38
+ We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
39
+
40
+ ## Training
41
+
42
+ We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
43
+ For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
44
+
45
+ For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
46
+
47
+ ## Licenses
48
+
49
+ The pretrained models are distributed under the Apache License 2.0.
50
+
51
+ ## Acknowledgments
52
+
53
+ This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
bert/bert-base-japanese-v3/config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dropout_prob": 0.1,
8
+ "hidden_size": 768,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 3072,
11
+ "layer_norm_eps": 1e-12,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "bert",
14
+ "num_attention_heads": 12,
15
+ "num_hidden_layers": 12,
16
+ "pad_token_id": 0,
17
+ "type_vocab_size": 2,
18
+ "vocab_size": 32768
19
+ }
bert/bert-base-japanese-v3/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertJapaneseTokenizer",
3
+ "model_max_length": 512,
4
+ "do_lower_case": false,
5
+ "word_tokenizer_type": "mecab",
6
+ "subword_tokenizer_type": "wordpiece",
7
+ "mecab_kwargs": {
8
+ "mecab_dic": "unidic_lite"
9
+ }
10
+ }
bert/bert-base-japanese-v3/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert/chinese-roberta-wwm-ext-large/.gitattributes ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
bert/chinese-roberta-wwm-ext-large/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.bin
bert/chinese-roberta-wwm-ext-large/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - zh
4
+ tags:
5
+ - bert
6
+ license: "apache-2.0"
7
+ ---
8
+
9
+ # Please use 'Bert' related functions to load this model!
10
+
11
+ ## Chinese BERT with Whole Word Masking
12
+ For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
13
+
14
+ **[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
15
+ Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
16
+
17
+ This repository is developed based on:https://github.com/google-research/bert
18
+
19
+ You may also interested in,
20
+ - Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
21
+ - Chinese MacBERT: https://github.com/ymcui/MacBERT
22
+ - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
23
+ - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
24
+ - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
25
+
26
+ More resources by HFL: https://github.com/ymcui/HFL-Anthology
27
+
28
+ ## Citation
29
+ If you find the technical report or resource is useful, please cite the following technical report in your paper.
30
+ - Primary: https://arxiv.org/abs/2004.13922
31
+ ```
32
+ @inproceedings{cui-etal-2020-revisiting,
33
+ title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
34
+ author = "Cui, Yiming and
35
+ Che, Wanxiang and
36
+ Liu, Ting and
37
+ Qin, Bing and
38
+ Wang, Shijin and
39
+ Hu, Guoping",
40
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
41
+ month = nov,
42
+ year = "2020",
43
+ address = "Online",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
46
+ pages = "657--668",
47
+ }
48
+ ```
49
+ - Secondary: https://arxiv.org/abs/1906.08101
50
+ ```
51
+ @article{chinese-bert-wwm,
52
+ title={Pre-Training with Whole Word Masking for Chinese BERT},
53
+ author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
54
+ journal={arXiv preprint arXiv:1906.08101},
55
+ year={2019}
56
+ }
57
+ ```
bert/chinese-roberta-wwm-ext-large/added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
bert/chinese-roberta-wwm-ext-large/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "directionality": "bidi",
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 16,
18
+ "num_hidden_layers": 24,
19
+ "output_past": true,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "type_vocab_size": 2,
27
+ "vocab_size": 21128
28
+ }
bert/chinese-roberta-wwm-ext-large/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
bert/chinese-roberta-wwm-ext-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert/chinese-roberta-wwm-ext-large/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"init_inputs": []}
bert/chinese-roberta-wwm-ext-large/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
bert_gen.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from multiprocessing import Pool
3
+ import commons
4
+ import utils
5
+ from tqdm import tqdm
6
+ from text import cleaned_text_to_sequence, get_bert
7
+ import argparse
8
+ import torch.multiprocessing as mp
9
+
10
+
11
+ def process_line(line):
12
+ rank = mp.current_process()._identity
13
+ rank = rank[0] if len(rank) > 0 else 0
14
+ if torch.cuda.is_available():
15
+ gpu_id = rank % torch.cuda.device_count()
16
+ device = torch.device(f"cuda:{gpu_id}")
17
+ wav_path, _, language_str, text, phones, tone, word2ph = line.strip().split("|")
18
+ phone = phones.split(" ")
19
+ tone = [int(i) for i in tone.split(" ")]
20
+ word2ph = [int(i) for i in word2ph.split(" ")]
21
+ word2ph = [i for i in word2ph]
22
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
23
+
24
+ phone = commons.intersperse(phone, 0)
25
+ tone = commons.intersperse(tone, 0)
26
+ language = commons.intersperse(language, 0)
27
+ for i in range(len(word2ph)):
28
+ word2ph[i] = word2ph[i] * 2
29
+ word2ph[0] += 1
30
+
31
+ bert_path = wav_path.replace(".wav", ".bert.pt")
32
+
33
+ try:
34
+ bert = torch.load(bert_path)
35
+ assert bert.shape[-1] == len(phone)
36
+ except Exception:
37
+ bert = get_bert(text, word2ph, language_str, device)
38
+ assert bert.shape[-1] == len(phone)
39
+ torch.save(bert, bert_path)
40
+
41
+
42
+ if __name__ == "__main__":
43
+ parser = argparse.ArgumentParser()
44
+ parser.add_argument("-c", "--config", type=str, default="configs/config.json")
45
+ parser.add_argument("--num_processes", type=int, default=2)
46
+ args = parser.parse_args()
47
+ config_path = args.config
48
+ hps = utils.get_hparams_from_file(config_path)
49
+ lines = []
50
+ with open(hps.data.training_files, encoding="utf-8") as f:
51
+ lines.extend(f.readlines())
52
+
53
+ with open(hps.data.validation_files, encoding="utf-8") as f:
54
+ lines.extend(f.readlines())
55
+
56
+ num_processes = args.num_processes
57
+ with Pool(processes=num_processes) as pool:
58
+ for _ in tqdm(pool.imap_unordered(process_line, lines), total=len(lines)):
59
+ pass
commons.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch.nn import functional as F
4
+
5
+
6
+ def init_weights(m, mean=0.0, std=0.01):
7
+ classname = m.__class__.__name__
8
+ if classname.find("Conv") != -1:
9
+ m.weight.data.normal_(mean, std)
10
+
11
+
12
+ def get_padding(kernel_size, dilation=1):
13
+ return int((kernel_size * dilation - dilation) / 2)
14
+
15
+
16
+ def convert_pad_shape(pad_shape):
17
+ layer = pad_shape[::-1]
18
+ pad_shape = [item for sublist in layer for item in sublist]
19
+ return pad_shape
20
+
21
+
22
+ def intersperse(lst, item):
23
+ result = [item] * (len(lst) * 2 + 1)
24
+ result[1::2] = lst
25
+ return result
26
+
27
+
28
+ def kl_divergence(m_p, logs_p, m_q, logs_q):
29
+ """KL(P||Q)"""
30
+ kl = (logs_q - logs_p) - 0.5
31
+ kl += (
32
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
33
+ )
34
+ return kl
35
+
36
+
37
+ def rand_gumbel(shape):
38
+ """Sample from the Gumbel distribution, protect from overflows."""
39
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
40
+ return -torch.log(-torch.log(uniform_samples))
41
+
42
+
43
+ def rand_gumbel_like(x):
44
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
45
+ return g
46
+
47
+
48
+ def slice_segments(x, ids_str, segment_size=4):
49
+ ret = torch.zeros_like(x[:, :, :segment_size])
50
+ for i in range(x.size(0)):
51
+ idx_str = ids_str[i]
52
+ idx_end = idx_str + segment_size
53
+ ret[i] = x[i, :, idx_str:idx_end]
54
+ return ret
55
+
56
+
57
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
58
+ b, d, t = x.size()
59
+ if x_lengths is None:
60
+ x_lengths = t
61
+ ids_str_max = x_lengths - segment_size + 1
62
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
63
+ ret = slice_segments(x, ids_str, segment_size)
64
+ return ret, ids_str
65
+
66
+
67
+ def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
68
+ position = torch.arange(length, dtype=torch.float)
69
+ num_timescales = channels // 2
70
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
71
+ num_timescales - 1
72
+ )
73
+ inv_timescales = min_timescale * torch.exp(
74
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
75
+ )
76
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
77
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
78
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
79
+ signal = signal.view(1, channels, length)
80
+ return signal
81
+
82
+
83
+ def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
84
+ b, channels, length = x.size()
85
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
86
+ return x + signal.to(dtype=x.dtype, device=x.device)
87
+
88
+
89
+ def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
90
+ b, channels, length = x.size()
91
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
92
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
93
+
94
+
95
+ def subsequent_mask(length):
96
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
97
+ return mask
98
+
99
+
100
+ @torch.jit.script
101
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
102
+ n_channels_int = n_channels[0]
103
+ in_act = input_a + input_b
104
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
105
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
106
+ acts = t_act * s_act
107
+ return acts
108
+
109
+
110
+ def convert_pad_shape(pad_shape):
111
+ layer = pad_shape[::-1]
112
+ pad_shape = [item for sublist in layer for item in sublist]
113
+ return pad_shape
114
+
115
+
116
+ def shift_1d(x):
117
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
118
+ return x
119
+
120
+
121
+ def sequence_mask(length, max_length=None):
122
+ if max_length is None:
123
+ max_length = length.max()
124
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
125
+ return x.unsqueeze(0) < length.unsqueeze(1)
126
+
127
+
128
+ def generate_path(duration, mask):
129
+ """
130
+ duration: [b, 1, t_x]
131
+ mask: [b, 1, t_y, t_x]
132
+ """
133
+
134
+ b, _, t_y, t_x = mask.shape
135
+ cum_duration = torch.cumsum(duration, -1)
136
+
137
+ cum_duration_flat = cum_duration.view(b * t_x)
138
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
139
+ path = path.view(b, t_x, t_y)
140
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
141
+ path = path.unsqueeze(1).transpose(2, 3) * mask
142
+ return path
143
+
144
+
145
+ def clip_grad_value_(parameters, clip_value, norm_type=2):
146
+ if isinstance(parameters, torch.Tensor):
147
+ parameters = [parameters]
148
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
149
+ norm_type = float(norm_type)
150
+ if clip_value is not None:
151
+ clip_value = float(clip_value)
152
+
153
+ total_norm = 0
154
+ for p in parameters:
155
+ param_norm = p.grad.data.norm(norm_type)
156
+ total_norm += param_norm.item() ** norm_type
157
+ if clip_value is not None:
158
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
159
+ total_norm = total_norm ** (1.0 / norm_type)
160
+ return total_norm
data_utils.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import torch
4
+ import torch.utils.data
5
+ from tqdm import tqdm
6
+ from loguru import logger
7
+ import commons
8
+ from mel_processing import spectrogram_torch, mel_spectrogram_torch
9
+ from utils import load_wav_to_torch, load_filepaths_and_text
10
+ from text import cleaned_text_to_sequence, get_bert
11
+
12
+ """Multi speaker version"""
13
+
14
+
15
+ class TextAudioSpeakerLoader(torch.utils.data.Dataset):
16
+ """
17
+ 1) loads audio, speaker_id, text pairs
18
+ 2) normalizes text and converts them to sequences of integers
19
+ 3) computes spectrograms from audio files.
20
+ """
21
+
22
+ def __init__(self, audiopaths_sid_text, hparams):
23
+ self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
24
+ self.max_wav_value = hparams.max_wav_value
25
+ self.sampling_rate = hparams.sampling_rate
26
+ self.filter_length = hparams.filter_length
27
+ self.hop_length = hparams.hop_length
28
+ self.win_length = hparams.win_length
29
+ self.sampling_rate = hparams.sampling_rate
30
+ self.spk_map = hparams.spk2id
31
+ self.hparams = hparams
32
+
33
+ self.use_mel_spec_posterior = getattr(
34
+ hparams, "use_mel_posterior_encoder", False
35
+ )
36
+ if self.use_mel_spec_posterior:
37
+ self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
38
+
39
+ self.cleaned_text = getattr(hparams, "cleaned_text", False)
40
+
41
+ self.add_blank = hparams.add_blank
42
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
43
+ self.max_text_len = getattr(hparams, "max_text_len", 300)
44
+
45
+ random.seed(1234)
46
+ random.shuffle(self.audiopaths_sid_text)
47
+ self._filter()
48
+
49
+ def _filter(self):
50
+ """
51
+ Filter text & store spec lengths
52
+ """
53
+ # Store spectrogram lengths for Bucketing
54
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
55
+ # spec_length = wav_length // hop_length
56
+
57
+ audiopaths_sid_text_new = []
58
+ lengths = []
59
+ skipped = 0
60
+ logger.info("Init dataset...")
61
+ for _id, spk, language, text, phones, tone, word2ph in tqdm(
62
+ self.audiopaths_sid_text
63
+ ):
64
+ audiopath = f"filelists/{_id}"
65
+ if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
66
+ phones = phones.split(" ")
67
+ tone = [int(i) for i in tone.split(" ")]
68
+ word2ph = [int(i) for i in word2ph.split(" ")]
69
+ audiopaths_sid_text_new.append(
70
+ [audiopath, spk, language, text, phones, tone, word2ph]
71
+ )
72
+ lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
73
+ else:
74
+ skipped += 1
75
+ logger.info(
76
+ "skipped: "
77
+ + str(skipped)
78
+ + ", total: "
79
+ + str(len(self.audiopaths_sid_text))
80
+ )
81
+ self.audiopaths_sid_text = audiopaths_sid_text_new
82
+ self.lengths = lengths
83
+
84
+ def get_audio_text_speaker_pair(self, audiopath_sid_text):
85
+ # separate filename, speaker_id and text
86
+ audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
87
+
88
+ bert, ja_bert, phones, tone, language = self.get_text(
89
+ text, word2ph, phones, tone, language, audiopath
90
+ )
91
+
92
+ spec, wav = self.get_audio(audiopath)
93
+ sid = torch.LongTensor([int(self.spk_map[sid])])
94
+ return (phones, spec, wav, sid, tone, language, bert, ja_bert)
95
+
96
+ def get_audio(self, filename):
97
+ audio, sampling_rate = load_wav_to_torch(filename)
98
+ if sampling_rate != self.sampling_rate:
99
+ raise ValueError(
100
+ "{} {} SR doesn't match target {} SR".format(
101
+ filename, sampling_rate, self.sampling_rate
102
+ )
103
+ )
104
+ audio_norm = audio / self.max_wav_value
105
+ audio_norm = audio_norm.unsqueeze(0)
106
+ spec_filename = filename.replace(".wav", ".spec.pt")
107
+ if self.use_mel_spec_posterior:
108
+ spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
109
+ try:
110
+ spec = torch.load(spec_filename)
111
+ except:
112
+ if self.use_mel_spec_posterior:
113
+ spec = mel_spectrogram_torch(
114
+ audio_norm,
115
+ self.filter_length,
116
+ self.n_mel_channels,
117
+ self.sampling_rate,
118
+ self.hop_length,
119
+ self.win_length,
120
+ self.hparams.mel_fmin,
121
+ self.hparams.mel_fmax,
122
+ center=False,
123
+ )
124
+ else:
125
+ spec = spectrogram_torch(
126
+ audio_norm,
127
+ self.filter_length,
128
+ self.sampling_rate,
129
+ self.hop_length,
130
+ self.win_length,
131
+ center=False,
132
+ )
133
+ spec = torch.squeeze(spec, 0)
134
+ torch.save(spec, spec_filename)
135
+ return spec, audio_norm
136
+
137
+ def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
138
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
139
+ if self.add_blank:
140
+ phone = commons.intersperse(phone, 0)
141
+ tone = commons.intersperse(tone, 0)
142
+ language = commons.intersperse(language, 0)
143
+ for i in range(len(word2ph)):
144
+ word2ph[i] = word2ph[i] * 2
145
+ word2ph[0] += 1
146
+ bert_path = wav_path.replace(".wav", ".bert.pt")
147
+ try:
148
+ bert = torch.load(bert_path)
149
+ assert bert.shape[-1] == len(phone)
150
+ except:
151
+ bert = get_bert(text, word2ph, language_str)
152
+ torch.save(bert, bert_path)
153
+ assert bert.shape[-1] == len(phone), phone
154
+
155
+ if language_str == "ZH":
156
+ bert = bert
157
+ ja_bert = torch.zeros(768, len(phone))
158
+ elif language_str == "JP":
159
+ ja_bert = bert
160
+ bert = torch.zeros(1024, len(phone))
161
+ else:
162
+ bert = torch.zeros(1024, len(phone))
163
+ ja_bert = torch.zeros(768, len(phone))
164
+ assert bert.shape[-1] == len(phone), (
165
+ bert.shape,
166
+ len(phone),
167
+ sum(word2ph),
168
+ p1,
169
+ p2,
170
+ t1,
171
+ t2,
172
+ pold,
173
+ pold2,
174
+ word2ph,
175
+ text,
176
+ w2pho,
177
+ )
178
+ phone = torch.LongTensor(phone)
179
+ tone = torch.LongTensor(tone)
180
+ language = torch.LongTensor(language)
181
+ return bert, ja_bert, phone, tone, language
182
+
183
+ def get_sid(self, sid):
184
+ sid = torch.LongTensor([int(sid)])
185
+ return sid
186
+
187
+ def __getitem__(self, index):
188
+ return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
189
+
190
+ def __len__(self):
191
+ return len(self.audiopaths_sid_text)
192
+
193
+
194
+ class TextAudioSpeakerCollate:
195
+ """Zero-pads model inputs and targets"""
196
+
197
+ def __init__(self, return_ids=False):
198
+ self.return_ids = return_ids
199
+
200
+ def __call__(self, batch):
201
+ """Collate's training batch from normalized text, audio and speaker identities
202
+ PARAMS
203
+ ------
204
+ batch: [text_normalized, spec_normalized, wav_normalized, sid]
205
+ """
206
+ # Right zero-pad all one-hot text sequences to max input length
207
+ _, ids_sorted_decreasing = torch.sort(
208
+ torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True
209
+ )
210
+
211
+ max_text_len = max([len(x[0]) for x in batch])
212
+ max_spec_len = max([x[1].size(1) for x in batch])
213
+ max_wav_len = max([x[2].size(1) for x in batch])
214
+
215
+ text_lengths = torch.LongTensor(len(batch))
216
+ spec_lengths = torch.LongTensor(len(batch))
217
+ wav_lengths = torch.LongTensor(len(batch))
218
+ sid = torch.LongTensor(len(batch))
219
+
220
+ text_padded = torch.LongTensor(len(batch), max_text_len)
221
+ tone_padded = torch.LongTensor(len(batch), max_text_len)
222
+ language_padded = torch.LongTensor(len(batch), max_text_len)
223
+ bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
224
+ ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)
225
+
226
+ spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
227
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
228
+ text_padded.zero_()
229
+ tone_padded.zero_()
230
+ language_padded.zero_()
231
+ spec_padded.zero_()
232
+ wav_padded.zero_()
233
+ bert_padded.zero_()
234
+ ja_bert_padded.zero_()
235
+ for i in range(len(ids_sorted_decreasing)):
236
+ row = batch[ids_sorted_decreasing[i]]
237
+
238
+ text = row[0]
239
+ text_padded[i, : text.size(0)] = text
240
+ text_lengths[i] = text.size(0)
241
+
242
+ spec = row[1]
243
+ spec_padded[i, :, : spec.size(1)] = spec
244
+ spec_lengths[i] = spec.size(1)
245
+
246
+ wav = row[2]
247
+ wav_padded[i, :, : wav.size(1)] = wav
248
+ wav_lengths[i] = wav.size(1)
249
+
250
+ sid[i] = row[3]
251
+
252
+ tone = row[4]
253
+ tone_padded[i, : tone.size(0)] = tone
254
+
255
+ language = row[5]
256
+ language_padded[i, : language.size(0)] = language
257
+
258
+ bert = row[6]
259
+ bert_padded[i, :, : bert.size(1)] = bert
260
+
261
+ ja_bert = row[7]
262
+ ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert
263
+
264
+ return (
265
+ text_padded,
266
+ text_lengths,
267
+ spec_padded,
268
+ spec_lengths,
269
+ wav_padded,
270
+ wav_lengths,
271
+ sid,
272
+ tone_padded,
273
+ language_padded,
274
+ bert_padded,
275
+ ja_bert_padded,
276
+ )
277
+
278
+
279
+ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
280
+ """
281
+ Maintain similar input lengths in a batch.
282
+ Length groups are specified by boundaries.
283
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
284
+
285
+ It removes samples which are not included in the boundaries.
286
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
287
+ """
288
+
289
+ def __init__(
290
+ self,
291
+ dataset,
292
+ batch_size,
293
+ boundaries,
294
+ num_replicas=None,
295
+ rank=None,
296
+ shuffle=True,
297
+ ):
298
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
299
+ self.lengths = dataset.lengths
300
+ self.batch_size = batch_size
301
+ self.boundaries = boundaries
302
+
303
+ self.buckets, self.num_samples_per_bucket = self._create_buckets()
304
+ self.total_size = sum(self.num_samples_per_bucket)
305
+ self.num_samples = self.total_size // self.num_replicas
306
+
307
+ def _create_buckets(self):
308
+ buckets = [[] for _ in range(len(self.boundaries) - 1)]
309
+ for i in range(len(self.lengths)):
310
+ length = self.lengths[i]
311
+ idx_bucket = self._bisect(length)
312
+ if idx_bucket != -1:
313
+ buckets[idx_bucket].append(i)
314
+
315
+ try:
316
+ for i in range(len(buckets) - 1, 0, -1):
317
+ if len(buckets[i]) == 0:
318
+ buckets.pop(i)
319
+ self.boundaries.pop(i + 1)
320
+ assert all(len(bucket) > 0 for bucket in buckets)
321
+ # When one bucket is not traversed
322
+ except Exception as e:
323
+ print("Bucket warning ", e)
324
+ for i in range(len(buckets) - 1, -1, -1):
325
+ if len(buckets[i]) == 0:
326
+ buckets.pop(i)
327
+ self.boundaries.pop(i + 1)
328
+
329
+ num_samples_per_bucket = []
330
+ for i in range(len(buckets)):
331
+ len_bucket = len(buckets[i])
332
+ total_batch_size = self.num_replicas * self.batch_size
333
+ rem = (
334
+ total_batch_size - (len_bucket % total_batch_size)
335
+ ) % total_batch_size
336
+ num_samples_per_bucket.append(len_bucket + rem)
337
+ return buckets, num_samples_per_bucket
338
+
339
+ def __iter__(self):
340
+ # deterministically shuffle based on epoch
341
+ g = torch.Generator()
342
+ g.manual_seed(self.epoch)
343
+
344
+ indices = []
345
+ if self.shuffle:
346
+ for bucket in self.buckets:
347
+ indices.append(torch.randperm(len(bucket), generator=g).tolist())
348
+ else:
349
+ for bucket in self.buckets:
350
+ indices.append(list(range(len(bucket))))
351
+
352
+ batches = []
353
+ for i in range(len(self.buckets)):
354
+ bucket = self.buckets[i]
355
+ len_bucket = len(bucket)
356
+ if len_bucket == 0:
357
+ continue
358
+ ids_bucket = indices[i]
359
+ num_samples_bucket = self.num_samples_per_bucket[i]
360
+
361
+ # add extra samples to make it evenly divisible
362
+ rem = num_samples_bucket - len_bucket
363
+ ids_bucket = (
364
+ ids_bucket
365
+ + ids_bucket * (rem // len_bucket)
366
+ + ids_bucket[: (rem % len_bucket)]
367
+ )
368
+
369
+ # subsample
370
+ ids_bucket = ids_bucket[self.rank :: self.num_replicas]
371
+
372
+ # batching
373
+ for j in range(len(ids_bucket) // self.batch_size):
374
+ batch = [
375
+ bucket[idx]
376
+ for idx in ids_bucket[
377
+ j * self.batch_size : (j + 1) * self.batch_size
378
+ ]
379
+ ]
380
+ batches.append(batch)
381
+
382
+ if self.shuffle:
383
+ batch_ids = torch.randperm(len(batches), generator=g).tolist()
384
+ batches = [batches[i] for i in batch_ids]
385
+ self.batches = batches
386
+
387
+ assert len(self.batches) * self.batch_size == self.num_samples
388
+ return iter(self.batches)
389
+
390
+ def _bisect(self, x, lo=0, hi=None):
391
+ if hi is None:
392
+ hi = len(self.boundaries) - 1
393
+
394
+ if hi > lo:
395
+ mid = (hi + lo) // 2
396
+ if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
397
+ return mid
398
+ elif x <= self.boundaries[mid]:
399
+ return self._bisect(x, lo, mid)
400
+ else:
401
+ return self._bisect(x, mid + 1, hi)
402
+ else:
403
+ return -1
404
+
405
+ def __len__(self):
406
+ return self.num_samples // self.batch_size
info.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Umamusume DeBERTA-VITS2 TTS
2
+
3
+ 👌 **Currently, ONLY Japanese is supported.** 👌
4
+
5
+ 💪 **Based on [Bert-VITS2](https://github.com/fishaudio/Bert-VITS2), this work tightly follows [Akito/umamusume_bert_vits2](https://huggingface.co/spaces/AkitoP/umamusume_bert_vits2), from which the Japanese text preprocessor is provided.** ❤
6
+
7
+ ✋ **Please do NOT enter a really LOOOONG sentence or sentences in a single row. Splitting your inputs into multiple rows makes each row to be inferenced separately.** ✋
8
+
9
+ ✋ **请不要在一行内输入长文本,模型会将每行的输入视为一句话进行推理。请将多句话分别放入不同的行中来减少推理时间.** ✋
10
+
11
+ ## Training Details - For those who may be interested
12
+
13
+ 🎈 **This work switches [cl-tohoku/bert-base-japanese-v3](https://huggingface.co/cl-tohoku/bert-base-japanese-v3) to [ku-nlp/deberta-v2-base-japanese](https://huggingface.co/ku-nlp/deberta-v2-base-japanese) expecting potentially better performance, and, just for fun.** 🥰
14
+
15
+ ❤ Thanks to **SUSTech Center for Computational Science and Engineering**. ❤ This model is trained on A100 (40GB) x 2 with **batch size 32** in total.
16
+
17
+ 💪 This model has been trained for **1 cycle, 90K steps (=60 epoch),** currently. 💪
18
+
19
+ 📕 This work uses linear with warmup (7.5% of total steps) LR scheduler with ` max_lr=1e-4`. 📕
20
+
21
+ ✂ This work clips gradient value to 10 ✂.
22
+
23
+ ⚠ Finetuning the model on **single-speaker datasets separately** will definitely reach better result than training on a huge dataset comprising of many speakers. Sharing a same model leads to unexpected mixing of the speaker's voice line. ⚠
24
+
25
+ ### TODO:
26
+
27
+ 📅 Train one more cycle using text preprocessor provided by [AkitoP](https://huggingface.co/AkitoP) with better long tone processing capacity. 📅
losses.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def feature_loss(fmap_r, fmap_g):
5
+ loss = 0
6
+ for dr, dg in zip(fmap_r, fmap_g):
7
+ for rl, gl in zip(dr, dg):
8
+ rl = rl.float().detach()
9
+ gl = gl.float()
10
+ loss += torch.mean(torch.abs(rl - gl))
11
+
12
+ return loss * 2
13
+
14
+
15
+ def discriminator_loss(disc_real_outputs, disc_generated_outputs):
16
+ loss = 0
17
+ r_losses = []
18
+ g_losses = []
19
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
20
+ dr = dr.float()
21
+ dg = dg.float()
22
+ r_loss = torch.mean((1 - dr) ** 2)
23
+ g_loss = torch.mean(dg**2)
24
+ loss += r_loss + g_loss
25
+ r_losses.append(r_loss.item())
26
+ g_losses.append(g_loss.item())
27
+
28
+ return loss, r_losses, g_losses
29
+
30
+
31
+ def generator_loss(disc_outputs):
32
+ loss = 0
33
+ gen_losses = []
34
+ for dg in disc_outputs:
35
+ dg = dg.float()
36
+ l = torch.mean((1 - dg) ** 2)
37
+ gen_losses.append(l)
38
+ loss += l
39
+
40
+ return loss, gen_losses
41
+
42
+
43
+ def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
44
+ """
45
+ z_p, logs_q: [b, h, t_t]
46
+ m_p, logs_p: [b, h, t_t]
47
+ """
48
+ z_p = z_p.float()
49
+ logs_q = logs_q.float()
50
+ m_p = m_p.float()
51
+ logs_p = logs_p.float()
52
+ z_mask = z_mask.float()
53
+
54
+ kl = logs_p - logs_q - 0.5
55
+ kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
56
+ kl = torch.sum(kl * z_mask)
57
+ l = kl / torch.sum(z_mask)
58
+ return l
mel_processing.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.utils.data
3
+ from librosa.filters import mel as librosa_mel_fn
4
+
5
+ MAX_WAV_VALUE = 32768.0
6
+
7
+
8
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
9
+ """
10
+ PARAMS
11
+ ------
12
+ C: compression factor
13
+ """
14
+ return torch.log(torch.clamp(x, min=clip_val) * C)
15
+
16
+
17
+ def dynamic_range_decompression_torch(x, C=1):
18
+ """
19
+ PARAMS
20
+ ------
21
+ C: compression factor used to compress
22
+ """
23
+ return torch.exp(x) / C
24
+
25
+
26
+ def spectral_normalize_torch(magnitudes):
27
+ output = dynamic_range_compression_torch(magnitudes)
28
+ return output
29
+
30
+
31
+ def spectral_de_normalize_torch(magnitudes):
32
+ output = dynamic_range_decompression_torch(magnitudes)
33
+ return output
34
+
35
+
36
+ mel_basis = {}
37
+ hann_window = {}
38
+
39
+
40
+ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
41
+ if torch.min(y) < -1.0:
42
+ print("min value is ", torch.min(y))
43
+ if torch.max(y) > 1.0:
44
+ print("max value is ", torch.max(y))
45
+
46
+ global hann_window
47
+ dtype_device = str(y.dtype) + "_" + str(y.device)
48
+ wnsize_dtype_device = str(win_size) + "_" + dtype_device
49
+ if wnsize_dtype_device not in hann_window:
50
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
51
+ dtype=y.dtype, device=y.device
52
+ )
53
+
54
+ y = torch.nn.functional.pad(
55
+ y.unsqueeze(1),
56
+ (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
57
+ mode="reflect",
58
+ )
59
+ y = y.squeeze(1)
60
+
61
+ spec = torch.stft(
62
+ y,
63
+ n_fft,
64
+ hop_length=hop_size,
65
+ win_length=win_size,
66
+ window=hann_window[wnsize_dtype_device],
67
+ center=center,
68
+ pad_mode="reflect",
69
+ normalized=False,
70
+ onesided=True,
71
+ return_complex=False,
72
+ )
73
+
74
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
75
+ return spec
76
+
77
+
78
+ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
79
+ global mel_basis
80
+ dtype_device = str(spec.dtype) + "_" + str(spec.device)
81
+ fmax_dtype_device = str(fmax) + "_" + dtype_device
82
+ if fmax_dtype_device not in mel_basis:
83
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
84
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
85
+ dtype=spec.dtype, device=spec.device
86
+ )
87
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
88
+ spec = spectral_normalize_torch(spec)
89
+ return spec
90
+
91
+
92
+ def mel_spectrogram_torch(
93
+ y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
94
+ ):
95
+ if torch.min(y) < -1.0:
96
+ print("min value is ", torch.min(y))
97
+ if torch.max(y) > 1.0:
98
+ print("max value is ", torch.max(y))
99
+
100
+ global mel_basis, hann_window
101
+ dtype_device = str(y.dtype) + "_" + str(y.device)
102
+ fmax_dtype_device = str(fmax) + "_" + dtype_device
103
+ wnsize_dtype_device = str(win_size) + "_" + dtype_device
104
+ if fmax_dtype_device not in mel_basis:
105
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
106
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
107
+ dtype=y.dtype, device=y.device
108
+ )
109
+ if wnsize_dtype_device not in hann_window:
110
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
111
+ dtype=y.dtype, device=y.device
112
+ )
113
+
114
+ y = torch.nn.functional.pad(
115
+ y.unsqueeze(1),
116
+ (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
117
+ mode="reflect",
118
+ )
119
+ y = y.squeeze(1)
120
+
121
+ spec = torch.stft(
122
+ y,
123
+ n_fft,
124
+ hop_length=hop_size,
125
+ win_length=win_size,
126
+ window=hann_window[wnsize_dtype_device],
127
+ center=center,
128
+ pad_mode="reflect",
129
+ normalized=False,
130
+ onesided=True,
131
+ return_complex=False,
132
+ )
133
+
134
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
135
+
136
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
137
+ spec = spectral_normalize_torch(spec)
138
+
139
+ return spec
models.py ADDED
@@ -0,0 +1,986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ import commons
7
+ import modules
8
+ import attentions
9
+ import monotonic_align
10
+
11
+ from torch.nn import Conv1d, ConvTranspose1d, Conv2d
12
+ from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
13
+
14
+ from commons import init_weights, get_padding
15
+ from text import symbols, num_tones, num_languages
16
+
17
+
18
+ class DurationDiscriminator(nn.Module): # vits2
19
+ def __init__(
20
+ self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
21
+ ):
22
+ super().__init__()
23
+
24
+ self.in_channels = in_channels
25
+ self.filter_channels = filter_channels
26
+ self.kernel_size = kernel_size
27
+ self.p_dropout = p_dropout
28
+ self.gin_channels = gin_channels
29
+
30
+ self.drop = nn.Dropout(p_dropout)
31
+ self.conv_1 = nn.Conv1d(
32
+ in_channels, filter_channels, kernel_size, padding=kernel_size // 2
33
+ )
34
+ self.norm_1 = modules.LayerNorm(filter_channels)
35
+ self.conv_2 = nn.Conv1d(
36
+ filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
37
+ )
38
+ self.norm_2 = modules.LayerNorm(filter_channels)
39
+ self.dur_proj = nn.Conv1d(1, filter_channels, 1)
40
+
41
+ self.pre_out_conv_1 = nn.Conv1d(
42
+ 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
43
+ )
44
+ self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
45
+ self.pre_out_conv_2 = nn.Conv1d(
46
+ filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
47
+ )
48
+ self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
49
+
50
+ if gin_channels != 0:
51
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
52
+
53
+ self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())
54
+
55
+ def forward_probability(self, x, x_mask, dur, g=None):
56
+ dur = self.dur_proj(dur)
57
+ x = torch.cat([x, dur], dim=1)
58
+ x = self.pre_out_conv_1(x * x_mask)
59
+ x = torch.relu(x)
60
+ x = self.pre_out_norm_1(x)
61
+ x = self.drop(x)
62
+ x = self.pre_out_conv_2(x * x_mask)
63
+ x = torch.relu(x)
64
+ x = self.pre_out_norm_2(x)
65
+ x = self.drop(x)
66
+ x = x * x_mask
67
+ x = x.transpose(1, 2)
68
+ output_prob = self.output_layer(x)
69
+ return output_prob
70
+
71
+ def forward(self, x, x_mask, dur_r, dur_hat, g=None):
72
+ x = torch.detach(x)
73
+ if g is not None:
74
+ g = torch.detach(g)
75
+ x = x + self.cond(g)
76
+ x = self.conv_1(x * x_mask)
77
+ x = torch.relu(x)
78
+ x = self.norm_1(x)
79
+ x = self.drop(x)
80
+ x = self.conv_2(x * x_mask)
81
+ x = torch.relu(x)
82
+ x = self.norm_2(x)
83
+ x = self.drop(x)
84
+
85
+ output_probs = []
86
+ for dur in [dur_r, dur_hat]:
87
+ output_prob = self.forward_probability(x, x_mask, dur, g)
88
+ output_probs.append(output_prob)
89
+
90
+ return output_probs
91
+
92
+
93
+ class TransformerCouplingBlock(nn.Module):
94
+ def __init__(
95
+ self,
96
+ channels,
97
+ hidden_channels,
98
+ filter_channels,
99
+ n_heads,
100
+ n_layers,
101
+ kernel_size,
102
+ p_dropout,
103
+ n_flows=4,
104
+ gin_channels=0,
105
+ share_parameter=False,
106
+ ):
107
+ super().__init__()
108
+ self.channels = channels
109
+ self.hidden_channels = hidden_channels
110
+ self.kernel_size = kernel_size
111
+ self.n_layers = n_layers
112
+ self.n_flows = n_flows
113
+ self.gin_channels = gin_channels
114
+
115
+ self.flows = nn.ModuleList()
116
+
117
+ self.wn = (
118
+ attentions.FFT(
119
+ hidden_channels,
120
+ filter_channels,
121
+ n_heads,
122
+ n_layers,
123
+ kernel_size,
124
+ p_dropout,
125
+ isflow=True,
126
+ gin_channels=self.gin_channels,
127
+ )
128
+ if share_parameter
129
+ else None
130
+ )
131
+
132
+ for i in range(n_flows):
133
+ self.flows.append(
134
+ modules.TransformerCouplingLayer(
135
+ channels,
136
+ hidden_channels,
137
+ kernel_size,
138
+ n_layers,
139
+ n_heads,
140
+ p_dropout,
141
+ filter_channels,
142
+ mean_only=True,
143
+ wn_sharing_parameter=self.wn,
144
+ gin_channels=self.gin_channels,
145
+ )
146
+ )
147
+ self.flows.append(modules.Flip())
148
+
149
+ def forward(self, x, x_mask, g=None, reverse=False):
150
+ if not reverse:
151
+ for flow in self.flows:
152
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
153
+ else:
154
+ for flow in reversed(self.flows):
155
+ x = flow(x, x_mask, g=g, reverse=reverse)
156
+ return x
157
+
158
+
159
+ class StochasticDurationPredictor(nn.Module):
160
+ def __init__(
161
+ self,
162
+ in_channels,
163
+ filter_channels,
164
+ kernel_size,
165
+ p_dropout,
166
+ n_flows=4,
167
+ gin_channels=0,
168
+ ):
169
+ super().__init__()
170
+ filter_channels = in_channels # it needs to be removed from future version.
171
+ self.in_channels = in_channels
172
+ self.filter_channels = filter_channels
173
+ self.kernel_size = kernel_size
174
+ self.p_dropout = p_dropout
175
+ self.n_flows = n_flows
176
+ self.gin_channels = gin_channels
177
+
178
+ self.log_flow = modules.Log()
179
+ self.flows = nn.ModuleList()
180
+ self.flows.append(modules.ElementwiseAffine(2))
181
+ for i in range(n_flows):
182
+ self.flows.append(
183
+ modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
184
+ )
185
+ self.flows.append(modules.Flip())
186
+
187
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
188
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
189
+ self.post_convs = modules.DDSConv(
190
+ filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
191
+ )
192
+ self.post_flows = nn.ModuleList()
193
+ self.post_flows.append(modules.ElementwiseAffine(2))
194
+ for i in range(4):
195
+ self.post_flows.append(
196
+ modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
197
+ )
198
+ self.post_flows.append(modules.Flip())
199
+
200
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
201
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
202
+ self.convs = modules.DDSConv(
203
+ filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
204
+ )
205
+ if gin_channels != 0:
206
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
207
+
208
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
209
+ x = torch.detach(x)
210
+ x = self.pre(x)
211
+ if g is not None:
212
+ g = torch.detach(g)
213
+ x = x + self.cond(g)
214
+ x = self.convs(x, x_mask)
215
+ x = self.proj(x) * x_mask
216
+
217
+ if not reverse:
218
+ flows = self.flows
219
+ assert w is not None
220
+
221
+ logdet_tot_q = 0
222
+ h_w = self.post_pre(w)
223
+ h_w = self.post_convs(h_w, x_mask)
224
+ h_w = self.post_proj(h_w) * x_mask
225
+ e_q = (
226
+ torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype)
227
+ * x_mask
228
+ )
229
+ z_q = e_q
230
+ for flow in self.post_flows:
231
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
232
+ logdet_tot_q += logdet_q
233
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
234
+ u = torch.sigmoid(z_u) * x_mask
235
+ z0 = (w - u) * x_mask
236
+ logdet_tot_q += torch.sum(
237
+ (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]
238
+ )
239
+ logq = (
240
+ torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2])
241
+ - logdet_tot_q
242
+ )
243
+
244
+ logdet_tot = 0
245
+ z0, logdet = self.log_flow(z0, x_mask)
246
+ logdet_tot += logdet
247
+ z = torch.cat([z0, z1], 1)
248
+ for flow in flows:
249
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
250
+ logdet_tot = logdet_tot + logdet
251
+ nll = (
252
+ torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2])
253
+ - logdet_tot
254
+ )
255
+ return nll + logq # [b]
256
+ else:
257
+ flows = list(reversed(self.flows))
258
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
259
+ z = (
260
+ torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype)
261
+ * noise_scale
262
+ )
263
+ for flow in flows:
264
+ z = flow(z, x_mask, g=x, reverse=reverse)
265
+ z0, z1 = torch.split(z, [1, 1], 1)
266
+ logw = z0
267
+ return logw
268
+
269
+
270
+ class DurationPredictor(nn.Module):
271
+ def __init__(
272
+ self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
273
+ ):
274
+ super().__init__()
275
+
276
+ self.in_channels = in_channels
277
+ self.filter_channels = filter_channels
278
+ self.kernel_size = kernel_size
279
+ self.p_dropout = p_dropout
280
+ self.gin_channels = gin_channels
281
+
282
+ self.drop = nn.Dropout(p_dropout)
283
+ self.conv_1 = nn.Conv1d(
284
+ in_channels, filter_channels, kernel_size, padding=kernel_size // 2
285
+ )
286
+ self.norm_1 = modules.LayerNorm(filter_channels)
287
+ self.conv_2 = nn.Conv1d(
288
+ filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
289
+ )
290
+ self.norm_2 = modules.LayerNorm(filter_channels)
291
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
292
+
293
+ if gin_channels != 0:
294
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
295
+
296
+ def forward(self, x, x_mask, g=None):
297
+ x = torch.detach(x)
298
+ if g is not None:
299
+ g = torch.detach(g)
300
+ x = x + self.cond(g)
301
+ x = self.conv_1(x * x_mask)
302
+ x = torch.relu(x)
303
+ x = self.norm_1(x)
304
+ x = self.drop(x)
305
+ x = self.conv_2(x * x_mask)
306
+ x = torch.relu(x)
307
+ x = self.norm_2(x)
308
+ x = self.drop(x)
309
+ x = self.proj(x * x_mask)
310
+ return x * x_mask
311
+
312
+
313
+ class TextEncoder(nn.Module):
314
+ def __init__(
315
+ self,
316
+ n_vocab,
317
+ out_channels,
318
+ hidden_channels,
319
+ filter_channels,
320
+ n_heads,
321
+ n_layers,
322
+ kernel_size,
323
+ p_dropout,
324
+ gin_channels=0,
325
+ ):
326
+ super().__init__()
327
+ self.n_vocab = n_vocab
328
+ self.out_channels = out_channels
329
+ self.hidden_channels = hidden_channels
330
+ self.filter_channels = filter_channels
331
+ self.n_heads = n_heads
332
+ self.n_layers = n_layers
333
+ self.kernel_size = kernel_size
334
+ self.p_dropout = p_dropout
335
+ self.gin_channels = gin_channels
336
+ self.emb = nn.Embedding(len(symbols), hidden_channels)
337
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
338
+ self.tone_emb = nn.Embedding(num_tones, hidden_channels)
339
+ nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels**-0.5)
340
+ self.language_emb = nn.Embedding(num_languages, hidden_channels)
341
+ nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels**-0.5)
342
+ self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
343
+ self.ja_bert_proj = nn.Conv1d(768, hidden_channels, 1)
344
+
345
+ self.encoder = attentions.Encoder(
346
+ hidden_channels,
347
+ filter_channels,
348
+ n_heads,
349
+ n_layers,
350
+ kernel_size,
351
+ p_dropout,
352
+ gin_channels=self.gin_channels,
353
+ )
354
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
355
+
356
+ def forward(self, x, x_lengths, tone, language, bert, ja_bert, g=None):
357
+ bert_emb = self.bert_proj(bert).transpose(1, 2)
358
+ ja_bert_emb = self.ja_bert_proj(ja_bert).transpose(1, 2)
359
+ x = (
360
+ self.emb(x)
361
+ + self.tone_emb(tone)
362
+ + self.language_emb(language)
363
+ + bert_emb
364
+ + ja_bert_emb
365
+ ) * math.sqrt(
366
+ self.hidden_channels
367
+ ) # [b, t, h]
368
+ x = torch.transpose(x, 1, -1) # [b, h, t]
369
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
370
+ x.dtype
371
+ )
372
+
373
+ x = self.encoder(x * x_mask, x_mask, g=g)
374
+ stats = self.proj(x) * x_mask
375
+
376
+ m, logs = torch.split(stats, self.out_channels, dim=1)
377
+ return x, m, logs, x_mask
378
+
379
+
380
+ class ResidualCouplingBlock(nn.Module):
381
+ def __init__(
382
+ self,
383
+ channels,
384
+ hidden_channels,
385
+ kernel_size,
386
+ dilation_rate,
387
+ n_layers,
388
+ n_flows=4,
389
+ gin_channels=0,
390
+ ):
391
+ super().__init__()
392
+ self.channels = channels
393
+ self.hidden_channels = hidden_channels
394
+ self.kernel_size = kernel_size
395
+ self.dilation_rate = dilation_rate
396
+ self.n_layers = n_layers
397
+ self.n_flows = n_flows
398
+ self.gin_channels = gin_channels
399
+
400
+ self.flows = nn.ModuleList()
401
+ for i in range(n_flows):
402
+ self.flows.append(
403
+ modules.ResidualCouplingLayer(
404
+ channels,
405
+ hidden_channels,
406
+ kernel_size,
407
+ dilation_rate,
408
+ n_layers,
409
+ gin_channels=gin_channels,
410
+ mean_only=True,
411
+ )
412
+ )
413
+ self.flows.append(modules.Flip())
414
+
415
+ def forward(self, x, x_mask, g=None, reverse=False):
416
+ if not reverse:
417
+ for flow in self.flows:
418
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
419
+ else:
420
+ for flow in reversed(self.flows):
421
+ x = flow(x, x_mask, g=g, reverse=reverse)
422
+ return x
423
+
424
+
425
+ class PosteriorEncoder(nn.Module):
426
+ def __init__(
427
+ self,
428
+ in_channels,
429
+ out_channels,
430
+ hidden_channels,
431
+ kernel_size,
432
+ dilation_rate,
433
+ n_layers,
434
+ gin_channels=0,
435
+ ):
436
+ super().__init__()
437
+ self.in_channels = in_channels
438
+ self.out_channels = out_channels
439
+ self.hidden_channels = hidden_channels
440
+ self.kernel_size = kernel_size
441
+ self.dilation_rate = dilation_rate
442
+ self.n_layers = n_layers
443
+ self.gin_channels = gin_channels
444
+
445
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
446
+ self.enc = modules.WN(
447
+ hidden_channels,
448
+ kernel_size,
449
+ dilation_rate,
450
+ n_layers,
451
+ gin_channels=gin_channels,
452
+ )
453
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
454
+
455
+ def forward(self, x, x_lengths, g=None):
456
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
457
+ x.dtype
458
+ )
459
+ x = self.pre(x) * x_mask
460
+ x = self.enc(x, x_mask, g=g)
461
+ stats = self.proj(x) * x_mask
462
+ m, logs = torch.split(stats, self.out_channels, dim=1)
463
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
464
+ return z, m, logs, x_mask
465
+
466
+
467
+ class Generator(torch.nn.Module):
468
+ def __init__(
469
+ self,
470
+ initial_channel,
471
+ resblock,
472
+ resblock_kernel_sizes,
473
+ resblock_dilation_sizes,
474
+ upsample_rates,
475
+ upsample_initial_channel,
476
+ upsample_kernel_sizes,
477
+ gin_channels=0,
478
+ ):
479
+ super(Generator, self).__init__()
480
+ self.num_kernels = len(resblock_kernel_sizes)
481
+ self.num_upsamples = len(upsample_rates)
482
+ self.conv_pre = Conv1d(
483
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
484
+ )
485
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
486
+
487
+ self.ups = nn.ModuleList()
488
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
489
+ self.ups.append(
490
+ weight_norm(
491
+ ConvTranspose1d(
492
+ upsample_initial_channel // (2**i),
493
+ upsample_initial_channel // (2 ** (i + 1)),
494
+ k,
495
+ u,
496
+ padding=(k - u) // 2,
497
+ )
498
+ )
499
+ )
500
+
501
+ self.resblocks = nn.ModuleList()
502
+ for i in range(len(self.ups)):
503
+ ch = upsample_initial_channel // (2 ** (i + 1))
504
+ for j, (k, d) in enumerate(
505
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
506
+ ):
507
+ self.resblocks.append(resblock(ch, k, d))
508
+
509
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
510
+ self.ups.apply(init_weights)
511
+
512
+ if gin_channels != 0:
513
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
514
+
515
+ def forward(self, x, g=None):
516
+ x = self.conv_pre(x)
517
+ if g is not None:
518
+ x = x + self.cond(g)
519
+
520
+ for i in range(self.num_upsamples):
521
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
522
+ x = self.ups[i](x)
523
+ xs = None
524
+ for j in range(self.num_kernels):
525
+ if xs is None:
526
+ xs = self.resblocks[i * self.num_kernels + j](x)
527
+ else:
528
+ xs += self.resblocks[i * self.num_kernels + j](x)
529
+ x = xs / self.num_kernels
530
+ x = F.leaky_relu(x)
531
+ x = self.conv_post(x)
532
+ x = torch.tanh(x)
533
+
534
+ return x
535
+
536
+ def remove_weight_norm(self):
537
+ print("Removing weight norm...")
538
+ for layer in self.ups:
539
+ remove_weight_norm(layer)
540
+ for layer in self.resblocks:
541
+ layer.remove_weight_norm()
542
+
543
+
544
+ class DiscriminatorP(torch.nn.Module):
545
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
546
+ super(DiscriminatorP, self).__init__()
547
+ self.period = period
548
+ self.use_spectral_norm = use_spectral_norm
549
+ norm_f = weight_norm if use_spectral_norm is False else spectral_norm
550
+ self.convs = nn.ModuleList(
551
+ [
552
+ norm_f(
553
+ Conv2d(
554
+ 1,
555
+ 32,
556
+ (kernel_size, 1),
557
+ (stride, 1),
558
+ padding=(get_padding(kernel_size, 1), 0),
559
+ )
560
+ ),
561
+ norm_f(
562
+ Conv2d(
563
+ 32,
564
+ 128,
565
+ (kernel_size, 1),
566
+ (stride, 1),
567
+ padding=(get_padding(kernel_size, 1), 0),
568
+ )
569
+ ),
570
+ norm_f(
571
+ Conv2d(
572
+ 128,
573
+ 512,
574
+ (kernel_size, 1),
575
+ (stride, 1),
576
+ padding=(get_padding(kernel_size, 1), 0),
577
+ )
578
+ ),
579
+ norm_f(
580
+ Conv2d(
581
+ 512,
582
+ 1024,
583
+ (kernel_size, 1),
584
+ (stride, 1),
585
+ padding=(get_padding(kernel_size, 1), 0),
586
+ )
587
+ ),
588
+ norm_f(
589
+ Conv2d(
590
+ 1024,
591
+ 1024,
592
+ (kernel_size, 1),
593
+ 1,
594
+ padding=(get_padding(kernel_size, 1), 0),
595
+ )
596
+ ),
597
+ ]
598
+ )
599
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
600
+
601
+ def forward(self, x):
602
+ fmap = []
603
+
604
+ # 1d to 2d
605
+ b, c, t = x.shape
606
+ if t % self.period != 0: # pad first
607
+ n_pad = self.period - (t % self.period)
608
+ x = F.pad(x, (0, n_pad), "reflect")
609
+ t = t + n_pad
610
+ x = x.view(b, c, t // self.period, self.period)
611
+
612
+ for layer in self.convs:
613
+ x = layer(x)
614
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
615
+ fmap.append(x)
616
+ x = self.conv_post(x)
617
+ fmap.append(x)
618
+ x = torch.flatten(x, 1, -1)
619
+
620
+ return x, fmap
621
+
622
+
623
+ class DiscriminatorS(torch.nn.Module):
624
+ def __init__(self, use_spectral_norm=False):
625
+ super(DiscriminatorS, self).__init__()
626
+ norm_f = weight_norm if use_spectral_norm is False else spectral_norm
627
+ self.convs = nn.ModuleList(
628
+ [
629
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
630
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
631
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
632
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
633
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
634
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
635
+ ]
636
+ )
637
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
638
+
639
+ def forward(self, x):
640
+ fmap = []
641
+
642
+ for layer in self.convs:
643
+ x = layer(x)
644
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
645
+ fmap.append(x)
646
+ x = self.conv_post(x)
647
+ fmap.append(x)
648
+ x = torch.flatten(x, 1, -1)
649
+
650
+ return x, fmap
651
+
652
+
653
+ class MultiPeriodDiscriminator(torch.nn.Module):
654
+ def __init__(self, use_spectral_norm=False):
655
+ super(MultiPeriodDiscriminator, self).__init__()
656
+ periods = [2, 3, 5, 7, 11]
657
+
658
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
659
+ discs = discs + [
660
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
661
+ ]
662
+ self.discriminators = nn.ModuleList(discs)
663
+
664
+ def forward(self, y, y_hat):
665
+ y_d_rs = []
666
+ y_d_gs = []
667
+ fmap_rs = []
668
+ fmap_gs = []
669
+ for i, d in enumerate(self.discriminators):
670
+ y_d_r, fmap_r = d(y)
671
+ y_d_g, fmap_g = d(y_hat)
672
+ y_d_rs.append(y_d_r)
673
+ y_d_gs.append(y_d_g)
674
+ fmap_rs.append(fmap_r)
675
+ fmap_gs.append(fmap_g)
676
+
677
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
678
+
679
+
680
+ class ReferenceEncoder(nn.Module):
681
+ """
682
+ inputs --- [N, Ty/r, n_mels*r] mels
683
+ outputs --- [N, ref_enc_gru_size]
684
+ """
685
+
686
+ def __init__(self, spec_channels, gin_channels=0):
687
+ super().__init__()
688
+ self.spec_channels = spec_channels
689
+ ref_enc_filters = [32, 32, 64, 64, 128, 128]
690
+ K = len(ref_enc_filters)
691
+ filters = [1] + ref_enc_filters
692
+ convs = [
693
+ weight_norm(
694
+ nn.Conv2d(
695
+ in_channels=filters[i],
696
+ out_channels=filters[i + 1],
697
+ kernel_size=(3, 3),
698
+ stride=(2, 2),
699
+ padding=(1, 1),
700
+ )
701
+ )
702
+ for i in range(K)
703
+ ]
704
+ self.convs = nn.ModuleList(convs)
705
+ # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) # noqa: E501
706
+
707
+ out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
708
+ self.gru = nn.GRU(
709
+ input_size=ref_enc_filters[-1] * out_channels,
710
+ hidden_size=256 // 2,
711
+ batch_first=True,
712
+ )
713
+ self.proj = nn.Linear(128, gin_channels)
714
+
715
+ def forward(self, inputs, mask=None):
716
+ N = inputs.size(0)
717
+ out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
718
+ for conv in self.convs:
719
+ out = conv(out)
720
+ # out = wn(out)
721
+ out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
722
+
723
+ out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
724
+ T = out.size(1)
725
+ N = out.size(0)
726
+ out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
727
+
728
+ self.gru.flatten_parameters()
729
+ memory, out = self.gru(out) # out --- [1, N, 128]
730
+
731
+ return self.proj(out.squeeze(0))
732
+
733
+ def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
734
+ for i in range(n_convs):
735
+ L = (L - kernel_size + 2 * pad) // stride + 1
736
+ return L
737
+
738
+
739
+ class SynthesizerTrn(nn.Module):
740
+ """
741
+ Synthesizer for Training
742
+ """
743
+
744
+ def __init__(
745
+ self,
746
+ n_vocab,
747
+ spec_channels,
748
+ segment_size,
749
+ inter_channels,
750
+ hidden_channels,
751
+ filter_channels,
752
+ n_heads,
753
+ n_layers,
754
+ kernel_size,
755
+ p_dropout,
756
+ resblock,
757
+ resblock_kernel_sizes,
758
+ resblock_dilation_sizes,
759
+ upsample_rates,
760
+ upsample_initial_channel,
761
+ upsample_kernel_sizes,
762
+ n_speakers=256,
763
+ gin_channels=256,
764
+ use_sdp=True,
765
+ n_flow_layer=4,
766
+ n_layers_trans_flow=6,
767
+ flow_share_parameter=False,
768
+ use_transformer_flow=True,
769
+ **kwargs
770
+ ):
771
+ super().__init__()
772
+ self.n_vocab = n_vocab
773
+ self.spec_channels = spec_channels
774
+ self.inter_channels = inter_channels
775
+ self.hidden_channels = hidden_channels
776
+ self.filter_channels = filter_channels
777
+ self.n_heads = n_heads
778
+ self.n_layers = n_layers
779
+ self.kernel_size = kernel_size
780
+ self.p_dropout = p_dropout
781
+ self.resblock = resblock
782
+ self.resblock_kernel_sizes = resblock_kernel_sizes
783
+ self.resblock_dilation_sizes = resblock_dilation_sizes
784
+ self.upsample_rates = upsample_rates
785
+ self.upsample_initial_channel = upsample_initial_channel
786
+ self.upsample_kernel_sizes = upsample_kernel_sizes
787
+ self.segment_size = segment_size
788
+ self.n_speakers = n_speakers
789
+ self.gin_channels = gin_channels
790
+ self.n_layers_trans_flow = n_layers_trans_flow
791
+ self.use_spk_conditioned_encoder = kwargs.get(
792
+ "use_spk_conditioned_encoder", True
793
+ )
794
+ self.use_sdp = use_sdp
795
+ self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False)
796
+ self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01)
797
+ self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6)
798
+ self.current_mas_noise_scale = self.mas_noise_scale_initial
799
+ if self.use_spk_conditioned_encoder and gin_channels > 0:
800
+ self.enc_gin_channels = gin_channels
801
+ self.enc_p = TextEncoder(
802
+ n_vocab,
803
+ inter_channels,
804
+ hidden_channels,
805
+ filter_channels,
806
+ n_heads,
807
+ n_layers,
808
+ kernel_size,
809
+ p_dropout,
810
+ gin_channels=self.enc_gin_channels,
811
+ )
812
+ self.dec = Generator(
813
+ inter_channels,
814
+ resblock,
815
+ resblock_kernel_sizes,
816
+ resblock_dilation_sizes,
817
+ upsample_rates,
818
+ upsample_initial_channel,
819
+ upsample_kernel_sizes,
820
+ gin_channels=gin_channels,
821
+ )
822
+ self.enc_q = PosteriorEncoder(
823
+ spec_channels,
824
+ inter_channels,
825
+ hidden_channels,
826
+ 5,
827
+ 1,
828
+ 16,
829
+ gin_channels=gin_channels,
830
+ )
831
+ if use_transformer_flow:
832
+ self.flow = TransformerCouplingBlock(
833
+ inter_channels,
834
+ hidden_channels,
835
+ filter_channels,
836
+ n_heads,
837
+ n_layers_trans_flow,
838
+ 5,
839
+ p_dropout,
840
+ n_flow_layer,
841
+ gin_channels=gin_channels,
842
+ share_parameter=flow_share_parameter,
843
+ )
844
+ else:
845
+ self.flow = ResidualCouplingBlock(
846
+ inter_channels,
847
+ hidden_channels,
848
+ 5,
849
+ 1,
850
+ n_flow_layer,
851
+ gin_channels=gin_channels,
852
+ )
853
+ self.sdp = StochasticDurationPredictor(
854
+ hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels
855
+ )
856
+ self.dp = DurationPredictor(
857
+ hidden_channels, 256, 3, 0.5, gin_channels=gin_channels
858
+ )
859
+
860
+ if n_speakers > 1:
861
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
862
+ else:
863
+ self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
864
+
865
+ def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert):
866
+ if self.n_speakers > 0:
867
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
868
+ else:
869
+ g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
870
+ x, m_p, logs_p, x_mask = self.enc_p(
871
+ x, x_lengths, tone, language, bert, ja_bert, g=g
872
+ )
873
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
874
+ z_p = self.flow(z, y_mask, g=g)
875
+
876
+ with torch.no_grad():
877
+ # negative cross-entropy
878
+ s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
879
+ neg_cent1 = torch.sum(
880
+ -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True
881
+ ) # [b, 1, t_s]
882
+ neg_cent2 = torch.matmul(
883
+ -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r
884
+ ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
885
+ neg_cent3 = torch.matmul(
886
+ z_p.transpose(1, 2), (m_p * s_p_sq_r)
887
+ ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
888
+ neg_cent4 = torch.sum(
889
+ -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True
890
+ ) # [b, 1, t_s]
891
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
892
+ if self.use_noise_scaled_mas:
893
+ epsilon = (
894
+ torch.std(neg_cent)
895
+ * torch.randn_like(neg_cent)
896
+ * self.current_mas_noise_scale
897
+ )
898
+ neg_cent = neg_cent + epsilon
899
+
900
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
901
+ attn = (
902
+ monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))
903
+ .unsqueeze(1)
904
+ .detach()
905
+ )
906
+
907
+ w = attn.sum(2)
908
+
909
+ l_length_sdp = self.sdp(x, x_mask, w, g=g)
910
+ l_length_sdp = l_length_sdp / torch.sum(x_mask)
911
+
912
+ logw_ = torch.log(w + 1e-6) * x_mask
913
+ logw = self.dp(x, x_mask, g=g)
914
+ l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(
915
+ x_mask
916
+ ) # for averaging
917
+
918
+ l_length = l_length_dp + l_length_sdp
919
+
920
+ # expand prior
921
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
922
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
923
+
924
+ z_slice, ids_slice = commons.rand_slice_segments(
925
+ z, y_lengths, self.segment_size
926
+ )
927
+ o = self.dec(z_slice, g=g)
928
+ return (
929
+ o,
930
+ l_length,
931
+ attn,
932
+ ids_slice,
933
+ x_mask,
934
+ y_mask,
935
+ (z, z_p, m_p, logs_p, m_q, logs_q),
936
+ (x, logw, logw_),
937
+ )
938
+
939
+ def infer(
940
+ self,
941
+ x,
942
+ x_lengths,
943
+ sid,
944
+ tone,
945
+ language,
946
+ bert,
947
+ ja_bert,
948
+ noise_scale=0.667,
949
+ length_scale=1,
950
+ noise_scale_w=0.8,
951
+ max_len=None,
952
+ sdp_ratio=0,
953
+ y=None,
954
+ ):
955
+ # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)
956
+ # g = self.gst(y)
957
+ if self.n_speakers > 0:
958
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
959
+ else:
960
+ g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)
961
+ x, m_p, logs_p, x_mask = self.enc_p(
962
+ x, x_lengths, tone, language, bert, ja_bert, g=g
963
+ )
964
+ logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (
965
+ sdp_ratio
966
+ ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)
967
+ w = torch.exp(logw) * x_mask * length_scale
968
+ w_ceil = torch.ceil(w)
969
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
970
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(
971
+ x_mask.dtype
972
+ )
973
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
974
+ attn = commons.generate_path(w_ceil, attn_mask)
975
+
976
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(
977
+ 1, 2
978
+ ) # [b, t', t], [b, t, d] -> [b, d, t']
979
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(
980
+ 1, 2
981
+ ) # [b, t', t], [b, t, d] -> [b, d, t']
982
+
983
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
984
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
985
+ o = self.dec((z * y_mask)[:, :, :max_len], g=g)
986
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
models/DUR_90000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d31c7a00f25b5089d20eaa210e62f8298a23cecccdbd8cf223950378dc21384
3
+ size 6891852
models/D_90000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d625aa331c4c97eb08406a6982752fe1154d4bdb823b761100b02081867a6af
3
+ size 561098682
models/G_90000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c8b15519019593b7c26487914447c4cba923d541a968057a7c5658b0026eda
3
+ size 857922750
models/config.json ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 20,
4
+ "eval_interval": 500,
5
+ "seed": 52,
6
+ "epochs": 10000,
7
+ "learning_rate": 1e-04,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 1,
14
+ "fp16_run": true,
15
+ "lr_decay": 0.999875,
16
+ "segment_size": 16384,
17
+ "init_lr_ratio": 1,
18
+ "warmup_ratio": 0.1,
19
+ "clipping_grad_norm": 10,
20
+ "c_mel": 45,
21
+ "c_kl": 1.0,
22
+ "skip_optimizer": true
23
+ },
24
+ "data": {
25
+ "training_files": "filelists/train-val-1.list",
26
+ "validation_files": "filelists/val-1.list",
27
+ "max_wav_value": 32768.0,
28
+ "sampling_rate": 44100,
29
+ "filter_length": 2048,
30
+ "hop_length": 512,
31
+ "win_length": 2048,
32
+ "n_mel_channels": 128,
33
+ "mel_fmin": 0.0,
34
+ "mel_fmax": null,
35
+ "add_blank": true,
36
+ "n_speakers": 256,
37
+ "cleaned_text": true,
38
+ "spk2id": {
39
+ "特别周": 0,
40
+ "无声铃鹿": 1,
41
+ "丸善斯基": 2,
42
+ "富士奇迹": 3,
43
+ "东海帝皇": 4,
44
+ "小栗帽": 5,
45
+ "黄金船": 6,
46
+ "伏特加": 7,
47
+ "大和赤骥": 8,
48
+ "菱亚马逊": 9,
49
+ "草上飞": 10,
50
+ "大树快车": 11,
51
+ "目白麦昆": 12,
52
+ "神鹰": 13,
53
+ "鲁道夫象征": 14,
54
+ "好歌剧": 15,
55
+ "成田白仁": 16,
56
+ "爱丽数码": 17,
57
+ "美妙姿势": 18,
58
+ "摩耶重炮": 19,
59
+ "玉藻十字": 20,
60
+ "琵琶晨光": 21,
61
+ "目白赖恩": 22,
62
+ "美浦波旁": 23,
63
+ "雪中美人": 24,
64
+ "米浴": 25,
65
+ "爱丽速子": 26,
66
+ "爱慕织姬": 27,
67
+ "曼城茶座": 28,
68
+ "气槽": 29,
69
+ "星云天空": 30,
70
+ "菱曙": 31,
71
+ "艾尼斯风神": 32,
72
+ "稻荷一": 33,
73
+ "空中神宫": 34,
74
+ "川上公主": 35,
75
+ "黄金城": 36,
76
+ "真机伶": 37,
77
+ "荣进闪耀": 38,
78
+ "采珠": 39,
79
+ "新光风": 40,
80
+ "超级小海湾": 41,
81
+ "荒漠英雄": 42,
82
+ "东瀛佐敦": 43,
83
+ "中山庆典": 44,
84
+ "成田大进": 45,
85
+ "西野花": 46,
86
+ "醒目飞鹰": 47,
87
+ "春乌拉拉": 48,
88
+ "青竹回忆": 49,
89
+ "待兼福来": 50,
90
+ "Mr CB": 51,
91
+ "美丽周日": 52,
92
+ "名将怒涛": 53,
93
+ "帝王光辉": 54,
94
+ "待兼诗歌剧": 55,
95
+ "生野狄杜斯": 56,
96
+ "优秀素质": 57,
97
+ "双涡轮": 58,
98
+ "目白多伯": 59,
99
+ "目白善信": 60,
100
+ "大拓太阳神": 61,
101
+ "北部玄驹": 62,
102
+ "目白阿尔丹": 63,
103
+ "八重无敌": 64,
104
+ "里见光钻": 65,
105
+ "天狼星象征": 66,
106
+ "樱花桂冠": 67,
107
+ "成田路": 68,
108
+ "也文摄辉": 69,
109
+ "吉兆": 70,
110
+ "鹤丸刚志": 71,
111
+ "谷野美酒": 72,
112
+ "第一红宝石": 73,
113
+ "目白高峰": 74,
114
+ "真弓快车": 75,
115
+ "里见皇冠": 76,
116
+ "高尚骏逸": 77,
117
+ "凯斯奇迹": 78,
118
+ "森林宝穴": 79,
119
+ "小林力奇": 80,
120
+ "奇瑞骏": 81,
121
+ "葛城王牌": 82,
122
+ "新宇宙": 83,
123
+ "菱钻奇宝": 84,
124
+ "望族": 85,
125
+ "骏川手纲": 86,
126
+ "秋川弥生": 87,
127
+ "乙名史悦子": 88,
128
+ "桐生院葵": 89,
129
+ "安心泽刺刺美": 90,
130
+ "达利阿拉伯": 91,
131
+ "高多芬柏布": 92,
132
+ "佐岳五月": 93,
133
+ "胜利奖券": 94,
134
+ "樱花进王": 95,
135
+ "东商变革": 96,
136
+ "微光飞驹": 97,
137
+ "樱花千代王": 98,
138
+ "跳舞城": 99,
139
+ "樫本理子": 100,
140
+ "明亮圣辉": 101,
141
+ "拜耶土耳其": 102
142
+ }
143
+ },
144
+ "model": {
145
+ "use_spk_conditioned_encoder": true,
146
+ "use_noise_scaled_mas": true,
147
+ "use_mel_posterior_encoder": false,
148
+ "use_duration_discriminator": true,
149
+ "inter_channels": 192,
150
+ "hidden_channels": 192,
151
+ "filter_channels": 768,
152
+ "n_heads": 2,
153
+ "n_layers": 6,
154
+ "kernel_size": 3,
155
+ "p_dropout": 0.1,
156
+ "resblock": "1",
157
+ "resblock_kernel_sizes": [
158
+ 3,
159
+ 7,
160
+ 11
161
+ ],
162
+ "resblock_dilation_sizes": [
163
+ [
164
+ 1,
165
+ 3,
166
+ 5
167
+ ],
168
+ [
169
+ 1,
170
+ 3,
171
+ 5
172
+ ],
173
+ [
174
+ 1,
175
+ 3,
176
+ 5
177
+ ]
178
+ ],
179
+ "upsample_rates": [
180
+ 8,
181
+ 8,
182
+ 2,
183
+ 2,
184
+ 2
185
+ ],
186
+ "upsample_initial_channel": 512,
187
+ "upsample_kernel_sizes": [
188
+ 16,
189
+ 16,
190
+ 8,
191
+ 2,
192
+ 2
193
+ ],
194
+ "n_layers_q": 3,
195
+ "use_spectral_norm": false,
196
+ "gin_channels": 256
197
+ }
198
+ }
modules.py ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from torch.nn import Conv1d
7
+ from torch.nn.utils import weight_norm, remove_weight_norm
8
+
9
+ import commons
10
+ from commons import init_weights, get_padding
11
+ from transforms import piecewise_rational_quadratic_transform
12
+ from attentions import Encoder
13
+
14
+ LRELU_SLOPE = 0.1
15
+
16
+
17
+ class LayerNorm(nn.Module):
18
+ def __init__(self, channels, eps=1e-5):
19
+ super().__init__()
20
+ self.channels = channels
21
+ self.eps = eps
22
+
23
+ self.gamma = nn.Parameter(torch.ones(channels))
24
+ self.beta = nn.Parameter(torch.zeros(channels))
25
+
26
+ def forward(self, x):
27
+ x = x.transpose(1, -1)
28
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
29
+ return x.transpose(1, -1)
30
+
31
+
32
+ class ConvReluNorm(nn.Module):
33
+ def __init__(
34
+ self,
35
+ in_channels,
36
+ hidden_channels,
37
+ out_channels,
38
+ kernel_size,
39
+ n_layers,
40
+ p_dropout,
41
+ ):
42
+ super().__init__()
43
+ self.in_channels = in_channels
44
+ self.hidden_channels = hidden_channels
45
+ self.out_channels = out_channels
46
+ self.kernel_size = kernel_size
47
+ self.n_layers = n_layers
48
+ self.p_dropout = p_dropout
49
+ assert n_layers > 1, "Number of layers should be larger than 0."
50
+
51
+ self.conv_layers = nn.ModuleList()
52
+ self.norm_layers = nn.ModuleList()
53
+ self.conv_layers.append(
54
+ nn.Conv1d(
55
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
56
+ )
57
+ )
58
+ self.norm_layers.append(LayerNorm(hidden_channels))
59
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
60
+ for _ in range(n_layers - 1):
61
+ self.conv_layers.append(
62
+ nn.Conv1d(
63
+ hidden_channels,
64
+ hidden_channels,
65
+ kernel_size,
66
+ padding=kernel_size // 2,
67
+ )
68
+ )
69
+ self.norm_layers.append(LayerNorm(hidden_channels))
70
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
71
+ self.proj.weight.data.zero_()
72
+ self.proj.bias.data.zero_()
73
+
74
+ def forward(self, x, x_mask):
75
+ x_org = x
76
+ for i in range(self.n_layers):
77
+ x = self.conv_layers[i](x * x_mask)
78
+ x = self.norm_layers[i](x)
79
+ x = self.relu_drop(x)
80
+ x = x_org + self.proj(x)
81
+ return x * x_mask
82
+
83
+
84
+ class DDSConv(nn.Module):
85
+ """
86
+ Dialted and Depth-Separable Convolution
87
+ """
88
+
89
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
90
+ super().__init__()
91
+ self.channels = channels
92
+ self.kernel_size = kernel_size
93
+ self.n_layers = n_layers
94
+ self.p_dropout = p_dropout
95
+
96
+ self.drop = nn.Dropout(p_dropout)
97
+ self.convs_sep = nn.ModuleList()
98
+ self.convs_1x1 = nn.ModuleList()
99
+ self.norms_1 = nn.ModuleList()
100
+ self.norms_2 = nn.ModuleList()
101
+ for i in range(n_layers):
102
+ dilation = kernel_size**i
103
+ padding = (kernel_size * dilation - dilation) // 2
104
+ self.convs_sep.append(
105
+ nn.Conv1d(
106
+ channels,
107
+ channels,
108
+ kernel_size,
109
+ groups=channels,
110
+ dilation=dilation,
111
+ padding=padding,
112
+ )
113
+ )
114
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
115
+ self.norms_1.append(LayerNorm(channels))
116
+ self.norms_2.append(LayerNorm(channels))
117
+
118
+ def forward(self, x, x_mask, g=None):
119
+ if g is not None:
120
+ x = x + g
121
+ for i in range(self.n_layers):
122
+ y = self.convs_sep[i](x * x_mask)
123
+ y = self.norms_1[i](y)
124
+ y = F.gelu(y)
125
+ y = self.convs_1x1[i](y)
126
+ y = self.norms_2[i](y)
127
+ y = F.gelu(y)
128
+ y = self.drop(y)
129
+ x = x + y
130
+ return x * x_mask
131
+
132
+
133
+ class WN(torch.nn.Module):
134
+ def __init__(
135
+ self,
136
+ hidden_channels,
137
+ kernel_size,
138
+ dilation_rate,
139
+ n_layers,
140
+ gin_channels=0,
141
+ p_dropout=0,
142
+ ):
143
+ super(WN, self).__init__()
144
+ assert kernel_size % 2 == 1
145
+ self.hidden_channels = hidden_channels
146
+ self.kernel_size = (kernel_size,)
147
+ self.dilation_rate = dilation_rate
148
+ self.n_layers = n_layers
149
+ self.gin_channels = gin_channels
150
+ self.p_dropout = p_dropout
151
+
152
+ self.in_layers = torch.nn.ModuleList()
153
+ self.res_skip_layers = torch.nn.ModuleList()
154
+ self.drop = nn.Dropout(p_dropout)
155
+
156
+ if gin_channels != 0:
157
+ cond_layer = torch.nn.Conv1d(
158
+ gin_channels, 2 * hidden_channels * n_layers, 1
159
+ )
160
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
161
+
162
+ for i in range(n_layers):
163
+ dilation = dilation_rate**i
164
+ padding = int((kernel_size * dilation - dilation) / 2)
165
+ in_layer = torch.nn.Conv1d(
166
+ hidden_channels,
167
+ 2 * hidden_channels,
168
+ kernel_size,
169
+ dilation=dilation,
170
+ padding=padding,
171
+ )
172
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
173
+ self.in_layers.append(in_layer)
174
+
175
+ # last one is not necessary
176
+ if i < n_layers - 1:
177
+ res_skip_channels = 2 * hidden_channels
178
+ else:
179
+ res_skip_channels = hidden_channels
180
+
181
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
182
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
183
+ self.res_skip_layers.append(res_skip_layer)
184
+
185
+ def forward(self, x, x_mask, g=None, **kwargs):
186
+ output = torch.zeros_like(x)
187
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
188
+
189
+ if g is not None:
190
+ g = self.cond_layer(g)
191
+
192
+ for i in range(self.n_layers):
193
+ x_in = self.in_layers[i](x)
194
+ if g is not None:
195
+ cond_offset = i * 2 * self.hidden_channels
196
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
197
+ else:
198
+ g_l = torch.zeros_like(x_in)
199
+
200
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
201
+ acts = self.drop(acts)
202
+
203
+ res_skip_acts = self.res_skip_layers[i](acts)
204
+ if i < self.n_layers - 1:
205
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
206
+ x = (x + res_acts) * x_mask
207
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
208
+ else:
209
+ output = output + res_skip_acts
210
+ return output * x_mask
211
+
212
+ def remove_weight_norm(self):
213
+ if self.gin_channels != 0:
214
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
215
+ for l in self.in_layers:
216
+ torch.nn.utils.remove_weight_norm(l)
217
+ for l in self.res_skip_layers:
218
+ torch.nn.utils.remove_weight_norm(l)
219
+
220
+
221
+ class ResBlock1(torch.nn.Module):
222
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
223
+ super(ResBlock1, self).__init__()
224
+ self.convs1 = nn.ModuleList(
225
+ [
226
+ weight_norm(
227
+ Conv1d(
228
+ channels,
229
+ channels,
230
+ kernel_size,
231
+ 1,
232
+ dilation=dilation[0],
233
+ padding=get_padding(kernel_size, dilation[0]),
234
+ )
235
+ ),
236
+ weight_norm(
237
+ Conv1d(
238
+ channels,
239
+ channels,
240
+ kernel_size,
241
+ 1,
242
+ dilation=dilation[1],
243
+ padding=get_padding(kernel_size, dilation[1]),
244
+ )
245
+ ),
246
+ weight_norm(
247
+ Conv1d(
248
+ channels,
249
+ channels,
250
+ kernel_size,
251
+ 1,
252
+ dilation=dilation[2],
253
+ padding=get_padding(kernel_size, dilation[2]),
254
+ )
255
+ ),
256
+ ]
257
+ )
258
+ self.convs1.apply(init_weights)
259
+
260
+ self.convs2 = nn.ModuleList(
261
+ [
262
+ weight_norm(
263
+ Conv1d(
264
+ channels,
265
+ channels,
266
+ kernel_size,
267
+ 1,
268
+ dilation=1,
269
+ padding=get_padding(kernel_size, 1),
270
+ )
271
+ ),
272
+ weight_norm(
273
+ Conv1d(
274
+ channels,
275
+ channels,
276
+ kernel_size,
277
+ 1,
278
+ dilation=1,
279
+ padding=get_padding(kernel_size, 1),
280
+ )
281
+ ),
282
+ weight_norm(
283
+ Conv1d(
284
+ channels,
285
+ channels,
286
+ kernel_size,
287
+ 1,
288
+ dilation=1,
289
+ padding=get_padding(kernel_size, 1),
290
+ )
291
+ ),
292
+ ]
293
+ )
294
+ self.convs2.apply(init_weights)
295
+
296
+ def forward(self, x, x_mask=None):
297
+ for c1, c2 in zip(self.convs1, self.convs2):
298
+ xt = F.leaky_relu(x, LRELU_SLOPE)
299
+ if x_mask is not None:
300
+ xt = xt * x_mask
301
+ xt = c1(xt)
302
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
303
+ if x_mask is not None:
304
+ xt = xt * x_mask
305
+ xt = c2(xt)
306
+ x = xt + x
307
+ if x_mask is not None:
308
+ x = x * x_mask
309
+ return x
310
+
311
+ def remove_weight_norm(self):
312
+ for l in self.convs1:
313
+ remove_weight_norm(l)
314
+ for l in self.convs2:
315
+ remove_weight_norm(l)
316
+
317
+
318
+ class ResBlock2(torch.nn.Module):
319
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
320
+ super(ResBlock2, self).__init__()
321
+ self.convs = nn.ModuleList(
322
+ [
323
+ weight_norm(
324
+ Conv1d(
325
+ channels,
326
+ channels,
327
+ kernel_size,
328
+ 1,
329
+ dilation=dilation[0],
330
+ padding=get_padding(kernel_size, dilation[0]),
331
+ )
332
+ ),
333
+ weight_norm(
334
+ Conv1d(
335
+ channels,
336
+ channels,
337
+ kernel_size,
338
+ 1,
339
+ dilation=dilation[1],
340
+ padding=get_padding(kernel_size, dilation[1]),
341
+ )
342
+ ),
343
+ ]
344
+ )
345
+ self.convs.apply(init_weights)
346
+
347
+ def forward(self, x, x_mask=None):
348
+ for c in self.convs:
349
+ xt = F.leaky_relu(x, LRELU_SLOPE)
350
+ if x_mask is not None:
351
+ xt = xt * x_mask
352
+ xt = c(xt)
353
+ x = xt + x
354
+ if x_mask is not None:
355
+ x = x * x_mask
356
+ return x
357
+
358
+ def remove_weight_norm(self):
359
+ for l in self.convs:
360
+ remove_weight_norm(l)
361
+
362
+
363
+ class Log(nn.Module):
364
+ def forward(self, x, x_mask, reverse=False, **kwargs):
365
+ if not reverse:
366
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
367
+ logdet = torch.sum(-y, [1, 2])
368
+ return y, logdet
369
+ else:
370
+ x = torch.exp(x) * x_mask
371
+ return x
372
+
373
+
374
+ class Flip(nn.Module):
375
+ def forward(self, x, *args, reverse=False, **kwargs):
376
+ x = torch.flip(x, [1])
377
+ if not reverse:
378
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
379
+ return x, logdet
380
+ else:
381
+ return x
382
+
383
+
384
+ class ElementwiseAffine(nn.Module):
385
+ def __init__(self, channels):
386
+ super().__init__()
387
+ self.channels = channels
388
+ self.m = nn.Parameter(torch.zeros(channels, 1))
389
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
390
+
391
+ def forward(self, x, x_mask, reverse=False, **kwargs):
392
+ if not reverse:
393
+ y = self.m + torch.exp(self.logs) * x
394
+ y = y * x_mask
395
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
396
+ return y, logdet
397
+ else:
398
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
399
+ return x
400
+
401
+
402
+ class ResidualCouplingLayer(nn.Module):
403
+ def __init__(
404
+ self,
405
+ channels,
406
+ hidden_channels,
407
+ kernel_size,
408
+ dilation_rate,
409
+ n_layers,
410
+ p_dropout=0,
411
+ gin_channels=0,
412
+ mean_only=False,
413
+ ):
414
+ assert channels % 2 == 0, "channels should be divisible by 2"
415
+ super().__init__()
416
+ self.channels = channels
417
+ self.hidden_channels = hidden_channels
418
+ self.kernel_size = kernel_size
419
+ self.dilation_rate = dilation_rate
420
+ self.n_layers = n_layers
421
+ self.half_channels = channels // 2
422
+ self.mean_only = mean_only
423
+
424
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
425
+ self.enc = WN(
426
+ hidden_channels,
427
+ kernel_size,
428
+ dilation_rate,
429
+ n_layers,
430
+ p_dropout=p_dropout,
431
+ gin_channels=gin_channels,
432
+ )
433
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
434
+ self.post.weight.data.zero_()
435
+ self.post.bias.data.zero_()
436
+
437
+ def forward(self, x, x_mask, g=None, reverse=False):
438
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
439
+ h = self.pre(x0) * x_mask
440
+ h = self.enc(h, x_mask, g=g)
441
+ stats = self.post(h) * x_mask
442
+ if not self.mean_only:
443
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
444
+ else:
445
+ m = stats
446
+ logs = torch.zeros_like(m)
447
+
448
+ if not reverse:
449
+ x1 = m + x1 * torch.exp(logs) * x_mask
450
+ x = torch.cat([x0, x1], 1)
451
+ logdet = torch.sum(logs, [1, 2])
452
+ return x, logdet
453
+ else:
454
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
455
+ x = torch.cat([x0, x1], 1)
456
+ return x
457
+
458
+
459
+ class ConvFlow(nn.Module):
460
+ def __init__(
461
+ self,
462
+ in_channels,
463
+ filter_channels,
464
+ kernel_size,
465
+ n_layers,
466
+ num_bins=10,
467
+ tail_bound=5.0,
468
+ ):
469
+ super().__init__()
470
+ self.in_channels = in_channels
471
+ self.filter_channels = filter_channels
472
+ self.kernel_size = kernel_size
473
+ self.n_layers = n_layers
474
+ self.num_bins = num_bins
475
+ self.tail_bound = tail_bound
476
+ self.half_channels = in_channels // 2
477
+
478
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
479
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
480
+ self.proj = nn.Conv1d(
481
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
482
+ )
483
+ self.proj.weight.data.zero_()
484
+ self.proj.bias.data.zero_()
485
+
486
+ def forward(self, x, x_mask, g=None, reverse=False):
487
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
488
+ h = self.pre(x0)
489
+ h = self.convs(h, x_mask, g=g)
490
+ h = self.proj(h) * x_mask
491
+
492
+ b, c, t = x0.shape
493
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
494
+
495
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
496
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
497
+ self.filter_channels
498
+ )
499
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
500
+
501
+ x1, logabsdet = piecewise_rational_quadratic_transform(
502
+ x1,
503
+ unnormalized_widths,
504
+ unnormalized_heights,
505
+ unnormalized_derivatives,
506
+ inverse=reverse,
507
+ tails="linear",
508
+ tail_bound=self.tail_bound,
509
+ )
510
+
511
+ x = torch.cat([x0, x1], 1) * x_mask
512
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
513
+ if not reverse:
514
+ return x, logdet
515
+ else:
516
+ return x
517
+
518
+
519
+ class TransformerCouplingLayer(nn.Module):
520
+ def __init__(
521
+ self,
522
+ channels,
523
+ hidden_channels,
524
+ kernel_size,
525
+ n_layers,
526
+ n_heads,
527
+ p_dropout=0,
528
+ filter_channels=0,
529
+ mean_only=False,
530
+ wn_sharing_parameter=None,
531
+ gin_channels=0,
532
+ ):
533
+ assert channels % 2 == 0, "channels should be divisible by 2"
534
+ super().__init__()
535
+ self.channels = channels
536
+ self.hidden_channels = hidden_channels
537
+ self.kernel_size = kernel_size
538
+ self.n_layers = n_layers
539
+ self.half_channels = channels // 2
540
+ self.mean_only = mean_only
541
+
542
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
543
+ self.enc = (
544
+ Encoder(
545
+ hidden_channels,
546
+ filter_channels,
547
+ n_heads,
548
+ n_layers,
549
+ kernel_size,
550
+ p_dropout,
551
+ isflow=True,
552
+ gin_channels=gin_channels,
553
+ )
554
+ if wn_sharing_parameter is None
555
+ else wn_sharing_parameter
556
+ )
557
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
558
+ self.post.weight.data.zero_()
559
+ self.post.bias.data.zero_()
560
+
561
+ def forward(self, x, x_mask, g=None, reverse=False):
562
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
563
+ h = self.pre(x0) * x_mask
564
+ h = self.enc(h, x_mask, g=g)
565
+ stats = self.post(h) * x_mask
566
+ if not self.mean_only:
567
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
568
+ else:
569
+ m = stats
570
+ logs = torch.zeros_like(m)
571
+
572
+ if not reverse:
573
+ x1 = m + x1 * torch.exp(logs) * x_mask
574
+ x = torch.cat([x0, x1], 1)
575
+ logdet = torch.sum(logs, [1, 2])
576
+ return x, logdet
577
+ else:
578
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
579
+ x = torch.cat([x0, x1], 1)
580
+ return x
581
+
582
+ x1, logabsdet = piecewise_rational_quadratic_transform(
583
+ x1,
584
+ unnormalized_widths,
585
+ unnormalized_heights,
586
+ unnormalized_derivatives,
587
+ inverse=reverse,
588
+ tails="linear",
589
+ tail_bound=self.tail_bound,
590
+ )
591
+
592
+ x = torch.cat([x0, x1], 1) * x_mask
593
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
594
+ if not reverse:
595
+ return x, logdet
596
+ else:
597
+ return x
monotonic_align/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import zeros, int32, float32
2
+ from torch import from_numpy
3
+
4
+ from .core import maximum_path_jit
5
+
6
+
7
+ def maximum_path(neg_cent, mask):
8
+ device = neg_cent.device
9
+ dtype = neg_cent.dtype
10
+ neg_cent = neg_cent.data.cpu().numpy().astype(float32)
11
+ path = zeros(neg_cent.shape, dtype=int32)
12
+
13
+ t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
14
+ t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
15
+ maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
16
+ return from_numpy(path).to(device=device, dtype=dtype)
monotonic_align/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (726 Bytes). View file
 
monotonic_align/__pycache__/core.cpython-39.pyc ADDED
Binary file (981 Bytes). View file
 
monotonic_align/core.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numba
2
+
3
+
4
+ @numba.jit(
5
+ numba.void(
6
+ numba.int32[:, :, ::1],
7
+ numba.float32[:, :, ::1],
8
+ numba.int32[::1],
9
+ numba.int32[::1],
10
+ ),
11
+ nopython=True,
12
+ nogil=True,
13
+ )
14
+ def maximum_path_jit(paths, values, t_ys, t_xs):
15
+ b = paths.shape[0]
16
+ max_neg_val = -1e9
17
+ for i in range(int(b)):
18
+ path = paths[i]
19
+ value = values[i]
20
+ t_y = t_ys[i]
21
+ t_x = t_xs[i]
22
+
23
+ v_prev = v_cur = 0.0
24
+ index = t_x - 1
25
+
26
+ for y in range(t_y):
27
+ for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
28
+ if x == y:
29
+ v_cur = max_neg_val
30
+ else:
31
+ v_cur = value[y - 1, x]
32
+ if x == 0:
33
+ if y == 0:
34
+ v_prev = 0.0
35
+ else:
36
+ v_prev = max_neg_val
37
+ else:
38
+ v_prev = value[y - 1, x - 1]
39
+ value[y, x] += max(v_prev, v_cur)
40
+
41
+ for y in range(t_y - 1, -1, -1):
42
+ path[y, index] = 1
43
+ if index != 0 and (
44
+ index == y or value[y - 1, index] < value[y - 1, index - 1]
45
+ ):
46
+ index = index - 1
preprocess_text.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os.path
3
+ from collections import defaultdict
4
+ from random import shuffle
5
+ from typing import Optional
6
+
7
+ from tqdm import tqdm
8
+ import click
9
+ from text.cleaner import clean_text
10
+
11
+
12
+ @click.command()
13
+ @click.option(
14
+ "--transcription-path",
15
+ default="filelists/output_fixed.txt",
16
+ type=click.Path(exists=True, file_okay=True, dir_okay=False),
17
+ )
18
+ @click.option("--cleaned-path", default=None)
19
+ @click.option("--train-path", default="filelists/train.list")
20
+ @click.option("--val-path", default="filelists/val.list")
21
+ @click.option(
22
+ "--config-path",
23
+ default="configs/config.json",
24
+ type=click.Path(exists=True, file_okay=True, dir_okay=False),
25
+ )
26
+ @click.option("--val-per-spk", default=1)
27
+ @click.option("--max-val-total", default=20)
28
+ @click.option("--clean/--no-clean", default=True)
29
+ def main(
30
+ transcription_path: str,
31
+ cleaned_path: Optional[str],
32
+ train_path: str,
33
+ val_path: str,
34
+ config_path: str,
35
+ val_per_spk: int,
36
+ max_val_total: int,
37
+ clean: bool,
38
+ ):
39
+ if cleaned_path is None:
40
+ cleaned_path = transcription_path + ".cleaned"
41
+
42
+ if clean:
43
+ out_file = open(cleaned_path, "w", encoding="utf-8")
44
+ for line in tqdm(open(transcription_path, encoding="utf-8").readlines()):
45
+ try:
46
+ utt, spk, language, text = line.strip().split("|")
47
+ norm_text, phones, tones, word2ph = clean_text(text, language)
48
+ out_file.write(
49
+ "{}|{}|{}|{}|{}|{}|{}\n".format(
50
+ utt,
51
+ spk,
52
+ language,
53
+ norm_text,
54
+ " ".join(phones),
55
+ " ".join([str(i) for i in tones]),
56
+ " ".join([str(i) for i in word2ph]),
57
+ )
58
+ )
59
+ except Exception as error:
60
+ print("err!", line, error)
61
+
62
+ out_file.close()
63
+
64
+ transcription_path = cleaned_path
65
+
66
+ spk_utt_map = defaultdict(list)
67
+ spk_id_map = {}
68
+ current_sid = 0
69
+
70
+ with open(transcription_path, encoding="utf-8") as f:
71
+ audioPaths = set()
72
+ countSame = 0
73
+ countNotFound = 0
74
+ for line in f.readlines():
75
+ utt, spk, language, text, phones, tones, word2ph = line.strip().split("|")
76
+ if utt in audioPaths:
77
+ # 过滤数据集错误:相同的音频匹配多个文本,导致后续bert出问题
78
+ print(f"重复音频文本:{line}")
79
+ countSame += 1
80
+ continue
81
+ if not os.path.isfile("filelists/" + utt):
82
+ print(f"没有找到对应的音频:{utt}")
83
+ countNotFound += 1
84
+ continue
85
+ audioPaths.add(utt)
86
+ spk_utt_map[spk].append(line)
87
+
88
+ if spk not in spk_id_map.keys():
89
+ spk_id_map[spk] = current_sid
90
+ current_sid += 1
91
+ print(f"总重复音频数:{countSame},总未找到的音频数:{countNotFound}")
92
+
93
+ train_list = []
94
+ val_list = []
95
+
96
+ for spk, utts in spk_utt_map.items():
97
+ shuffle(utts)
98
+ val_list += utts[:val_per_spk]
99
+ train_list += utts[val_per_spk:]
100
+
101
+ if len(val_list) > max_val_total:
102
+ train_list += val_list[max_val_total:]
103
+ val_list = val_list[:max_val_total]
104
+
105
+ with open(train_path, "w", encoding="utf-8") as f:
106
+ for line in train_list:
107
+ f.write(line)
108
+
109
+ with open(val_path, "w", encoding="utf-8") as f:
110
+ for line in val_list:
111
+ f.write(line)
112
+
113
+ config = json.load(open(config_path, encoding="utf-8"))
114
+ config["data"]["spk2id"] = spk_id_map
115
+ with open(config_path, "w", encoding="utf-8") as f:
116
+ json.dump(config, f, indent=2, ensure_ascii=False)
117
+
118
+
119
+ if __name__ == "__main__":
120
+ main()
requirements.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ librosa==0.9.1
2
+ matplotlib
3
+ numpy
4
+ numba
5
+ phonemizer
6
+ scipy
7
+ tensorboard
8
+ torch
9
+ torchvision
10
+ Unidecode
11
+ amfm_decompy
12
+ jieba
13
+ transformers
14
+ pypinyin
15
+ cn2an
16
+ gradio
17
+ av
18
+ mecab-python3
19
+ loguru
20
+ unidic-lite
21
+ cmudict
22
+ fugashi
23
+ num2words
resample.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import librosa
4
+ from multiprocessing import Pool, cpu_count
5
+
6
+ import soundfile
7
+ from tqdm import tqdm
8
+
9
+
10
+ def process(item):
11
+ spkdir, wav_name, args = item
12
+ speaker = spkdir.replace("\\", "/").split("/")[-1]
13
+ wav_path = os.path.join(args.in_dir, speaker, wav_name)
14
+ if os.path.exists(wav_path) and ".wav" in wav_path:
15
+ os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
16
+ wav, sr = librosa.load(wav_path, sr=args.sr)
17
+ soundfile.write(os.path.join(args.out_dir, speaker, wav_name), wav, sr)
18
+
19
+
20
+ if __name__ == "__main__":
21
+ parser = argparse.ArgumentParser()
22
+ parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
23
+ parser.add_argument(
24
+ "--in_dir", type=str, default="./raw", help="path to source dir"
25
+ )
26
+ parser.add_argument(
27
+ "--out_dir", type=str, default="./dataset", help="path to target dir"
28
+ )
29
+ args = parser.parse_args()
30
+ # processes = 8
31
+ processes = cpu_count() - 2 if cpu_count() > 4 else 1
32
+ pool = Pool(processes=processes)
33
+
34
+ for speaker in os.listdir(args.in_dir):
35
+ spk_dir = os.path.join(args.in_dir, speaker)
36
+ if os.path.isdir(spk_dir):
37
+ print(spk_dir)
38
+ for _ in tqdm(
39
+ pool.imap_unordered(
40
+ process,
41
+ [
42
+ (spk_dir, i, args)
43
+ for i in os.listdir(spk_dir)
44
+ if i.endswith("wav")
45
+ ],
46
+ )
47
+ ):
48
+ pass
server.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, Response
2
+ from io import BytesIO
3
+ import torch
4
+ from av import open as avopen
5
+
6
+ import commons
7
+ import utils
8
+ from models import SynthesizerTrn
9
+ from text.symbols import symbols
10
+ from text import cleaned_text_to_sequence, get_bert
11
+ from text.cleaner import clean_text
12
+ from scipy.io import wavfile
13
+
14
+ # Flask Init
15
+ app = Flask(__name__)
16
+ app.config["JSON_AS_ASCII"] = False
17
+
18
+
19
+ def get_text(text, language_str, hps):
20
+ norm_text, phone, tone, word2ph = clean_text(text, language_str)
21
+ phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
22
+
23
+ if hps.data.add_blank:
24
+ phone = commons.intersperse(phone, 0)
25
+ tone = commons.intersperse(tone, 0)
26
+ language = commons.intersperse(language, 0)
27
+ for i in range(len(word2ph)):
28
+ word2ph[i] = word2ph[i] * 2
29
+ word2ph[0] += 1
30
+ bert = get_bert(norm_text, word2ph, language_str)
31
+ del word2ph
32
+ assert bert.shape[-1] == len(phone), phone
33
+
34
+ if language_str == "ZH":
35
+ bert = bert
36
+ ja_bert = torch.zeros(768, len(phone))
37
+ elif language_str == "JA":
38
+ ja_bert = bert
39
+ bert = torch.zeros(1024, len(phone))
40
+ else:
41
+ bert = torch.zeros(1024, len(phone))
42
+ ja_bert = torch.zeros(768, len(phone))
43
+ assert bert.shape[-1] == len(
44
+ phone
45
+ ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
46
+ phone = torch.LongTensor(phone)
47
+ tone = torch.LongTensor(tone)
48
+ language = torch.LongTensor(language)
49
+ return bert, ja_bert, phone, tone, language
50
+
51
+
52
+ def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
53
+ bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
54
+ with torch.no_grad():
55
+ x_tst = phones.to(dev).unsqueeze(0)
56
+ tones = tones.to(dev).unsqueeze(0)
57
+ lang_ids = lang_ids.to(dev).unsqueeze(0)
58
+ bert = bert.to(dev).unsqueeze(0)
59
+ ja_bert = ja_bert.to(device).unsqueeze(0)
60
+ x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev)
61
+ speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev)
62
+ audio = (
63
+ net_g.infer(
64
+ x_tst,
65
+ x_tst_lengths,
66
+ speakers,
67
+ tones,
68
+ lang_ids,
69
+ bert,
70
+ ja_bert,
71
+ sdp_ratio=sdp_ratio,
72
+ noise_scale=noise_scale,
73
+ noise_scale_w=noise_scale_w,
74
+ length_scale=length_scale,
75
+ )[0][0, 0]
76
+ .data.cpu()
77
+ .float()
78
+ .numpy()
79
+ )
80
+ return audio
81
+
82
+
83
+ def replace_punctuation(text, i=2):
84
+ punctuation = ",。?!"
85
+ for char in punctuation:
86
+ text = text.replace(char, char * i)
87
+ return text
88
+
89
+
90
+ def wav2(i, o, format):
91
+ inp = avopen(i, "rb")
92
+ out = avopen(o, "wb", format=format)
93
+ if format == "ogg":
94
+ format = "libvorbis"
95
+
96
+ ostream = out.add_stream(format)
97
+
98
+ for frame in inp.decode(audio=0):
99
+ for p in ostream.encode(frame):
100
+ out.mux(p)
101
+
102
+ for p in ostream.encode(None):
103
+ out.mux(p)
104
+
105
+ out.close()
106
+ inp.close()
107
+
108
+
109
+ # Load Generator
110
+ hps = utils.get_hparams_from_file("./configs/config.json")
111
+
112
+ dev = "cuda"
113
+ net_g = SynthesizerTrn(
114
+ len(symbols),
115
+ hps.data.filter_length // 2 + 1,
116
+ hps.train.segment_size // hps.data.hop_length,
117
+ n_speakers=hps.data.n_speakers,
118
+ **hps.model,
119
+ ).to(dev)
120
+ _ = net_g.eval()
121
+
122
+ _ = utils.load_checkpoint("logs/G_649000.pth", net_g, None, skip_optimizer=True)
123
+
124
+
125
+ @app.route("/")
126
+ def main():
127
+ try:
128
+ speaker = request.args.get("speaker")
129
+ text = request.args.get("text").replace("/n", "")
130
+ sdp_ratio = float(request.args.get("sdp_ratio", 0.2))
131
+ noise = float(request.args.get("noise", 0.5))
132
+ noisew = float(request.args.get("noisew", 0.6))
133
+ length = float(request.args.get("length", 1.2))
134
+ language = request.args.get("language")
135
+ if length >= 2:
136
+ return "Too big length"
137
+ if len(text) >= 250:
138
+ return "Too long text"
139
+ fmt = request.args.get("format", "wav")
140
+ if None in (speaker, text):
141
+ return "Missing Parameter"
142
+ if fmt not in ("mp3", "wav", "ogg"):
143
+ return "Invalid Format"
144
+ if language not in ("JA", "ZH"):
145
+ return "Invalid language"
146
+ except:
147
+ return "Invalid Parameter"
148
+
149
+ with torch.no_grad():
150
+ audio = infer(
151
+ text,
152
+ sdp_ratio=sdp_ratio,
153
+ noise_scale=noise,
154
+ noise_scale_w=noisew,
155
+ length_scale=length,
156
+ sid=speaker,
157
+ language=language,
158
+ )
159
+
160
+ with BytesIO() as wav:
161
+ wavfile.write(wav, hps.data.sampling_rate, audio)
162
+ torch.cuda.empty_cache()
163
+ if fmt == "wav":
164
+ return Response(wav.getvalue(), mimetype="audio/wav")
165
+ wav.seek(0, 0)
166
+ with BytesIO() as ofp:
167
+ wav2(wav, ofp, fmt)
168
+ return Response(
169
+ ofp.getvalue(), mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg"
170
+ )
text/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from text.symbols import *
2
+
3
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
4
+
5
+
6
+ def cleaned_text_to_sequence(cleaned_text, tones, language):
7
+ """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
8
+ Args:
9
+ text: string to convert to a sequence
10
+ Returns:
11
+ List of integers corresponding to the symbols in the text
12
+ """
13
+ phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
14
+ tone_start = language_tone_start_map[language]
15
+ tones = [i + tone_start for i in tones]
16
+ lang_id = language_id_map[language]
17
+ lang_ids = [lang_id for i in phones]
18
+ return phones, tones, lang_ids
19
+
20
+
21
+ def get_bert(norm_text, word2ph, language, device="cuda"):
22
+ from .chinese_bert import get_bert_feature as zh_bert
23
+ from .english_bert_mock import get_bert_feature as en_bert
24
+ from .japanese_bert import get_bert_feature as jp_bert
25
+
26
+ lang_bert_func_map = {"ZH": zh_bert, "EN": en_bert, "JP": jp_bert}
27
+ bert = lang_bert_func_map[language](norm_text, word2ph, device)
28
+ return bert
text/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (1.57 kB). View file
 
text/__pycache__/chinese.cpython-39.pyc ADDED
Binary file (4.48 kB). View file
 
text/__pycache__/chinese_bert.cpython-39.pyc ADDED
Binary file (1.68 kB). View file
 
text/__pycache__/cleaner.cpython-39.pyc ADDED
Binary file (946 Bytes). View file
 
text/__pycache__/english_bert_mock.cpython-39.pyc ADDED
Binary file (312 Bytes). View file
 
text/__pycache__/japanese.cpython-39.pyc ADDED
Binary file (14.6 kB). View file
 
text/__pycache__/japanese_bert.cpython-39.pyc ADDED
Binary file (1.99 kB). View file
 
text/__pycache__/symbols.cpython-39.pyc ADDED
Binary file (1.47 kB). View file
 
text/__pycache__/tone_sandhi.cpython-39.pyc ADDED
Binary file (13.5 kB). View file
 
text/chinese.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ import cn2an
5
+ from pypinyin import lazy_pinyin, Style
6
+
7
+ from text.symbols import punctuation
8
+ from text.tone_sandhi import ToneSandhi
9
+
10
+ current_file_path = os.path.dirname(__file__)
11
+ pinyin_to_symbol_map = {
12
+ line.split("\t")[0]: line.strip().split("\t")[1]
13
+ for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
14
+ }
15
+
16
+ import jieba.posseg as psg
17
+
18
+
19
+ rep_map = {
20
+ ":": ",",
21
+ ";": ",",
22
+ ",": ",",
23
+ "。": ".",
24
+ "!": "!",
25
+ "?": "?",
26
+ "\n": ".",
27
+ "·": ",",
28
+ "、": ",",
29
+ "...": "…",
30
+ "$": ".",
31
+ "“": "'",
32
+ "”": "'",
33
+ "‘": "'",
34
+ "’": "'",
35
+ "(": "'",
36
+ ")": "'",
37
+ "(": "'",
38
+ ")": "'",
39
+ "《": "'",
40
+ "》": "'",
41
+ "【": "'",
42
+ "】": "'",
43
+ "[": "'",
44
+ "]": "'",
45
+ "—": "-",
46
+ "~": "-",
47
+ "~": "-",
48
+ "「": "'",
49
+ "」": "'",
50
+ }
51
+
52
+ tone_modifier = ToneSandhi()
53
+
54
+
55
+ def replace_punctuation(text):
56
+ text = text.replace("嗯", "恩").replace("呣", "母")
57
+ pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
58
+
59
+ replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
60
+
61
+ replaced_text = re.sub(
62
+ r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text
63
+ )
64
+
65
+ return replaced_text
66
+
67
+
68
+ def g2p(text):
69
+ pattern = r"(?<=[{0}])\s*".format("".join(punctuation))
70
+ sentences = [i for i in re.split(pattern, text) if i.strip() != ""]
71
+ phones, tones, word2ph = _g2p(sentences)
72
+ assert sum(word2ph) == len(phones)
73
+ assert len(word2ph) == len(text) # Sometimes it will crash,you can add a try-catch.
74
+ phones = ["_"] + phones + ["_"]
75
+ tones = [0] + tones + [0]
76
+ word2ph = [1] + word2ph + [1]
77
+ return phones, tones, word2ph
78
+
79
+
80
+ def _get_initials_finals(word):
81
+ initials = []
82
+ finals = []
83
+ orig_initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS)
84
+ orig_finals = lazy_pinyin(
85
+ word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
86
+ )
87
+ for c, v in zip(orig_initials, orig_finals):
88
+ initials.append(c)
89
+ finals.append(v)
90
+ return initials, finals
91
+
92
+
93
+ def _g2p(segments):
94
+ phones_list = []
95
+ tones_list = []
96
+ word2ph = []
97
+ for seg in segments:
98
+ # Replace all English words in the sentence
99
+ seg = re.sub("[a-zA-Z]+", "", seg)
100
+ seg_cut = psg.lcut(seg)
101
+ initials = []
102
+ finals = []
103
+ seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
104
+ for word, pos in seg_cut:
105
+ if pos == "eng":
106
+ continue
107
+ sub_initials, sub_finals = _get_initials_finals(word)
108
+ sub_finals = tone_modifier.modified_tone(word, pos, sub_finals)
109
+ initials.append(sub_initials)
110
+ finals.append(sub_finals)
111
+
112
+ # assert len(sub_initials) == len(sub_finals) == len(word)
113
+ initials = sum(initials, [])
114
+ finals = sum(finals, [])
115
+ #
116
+ for c, v in zip(initials, finals):
117
+ raw_pinyin = c + v
118
+ # NOTE: post process for pypinyin outputs
119
+ # we discriminate i, ii and iii
120
+ if c == v:
121
+ assert c in punctuation
122
+ phone = [c]
123
+ tone = "0"
124
+ word2ph.append(1)
125
+ else:
126
+ v_without_tone = v[:-1]
127
+ tone = v[-1]
128
+
129
+ pinyin = c + v_without_tone
130
+ assert tone in "12345"
131
+
132
+ if c:
133
+ # 多音节
134
+ v_rep_map = {
135
+ "uei": "ui",
136
+ "iou": "iu",
137
+ "uen": "un",
138
+ }
139
+ if v_without_tone in v_rep_map.keys():
140
+ pinyin = c + v_rep_map[v_without_tone]
141
+ else:
142
+ # 单音节
143
+ pinyin_rep_map = {
144
+ "ing": "ying",
145
+ "i": "yi",
146
+ "in": "yin",
147
+ "u": "wu",
148
+ }
149
+ if pinyin in pinyin_rep_map.keys():
150
+ pinyin = pinyin_rep_map[pinyin]
151
+ else:
152
+ single_rep_map = {
153
+ "v": "yu",
154
+ "e": "e",
155
+ "i": "y",
156
+ "u": "w",
157
+ }
158
+ if pinyin[0] in single_rep_map.keys():
159
+ pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
160
+
161
+ assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
162
+ phone = pinyin_to_symbol_map[pinyin].split(" ")
163
+ word2ph.append(len(phone))
164
+
165
+ phones_list += phone
166
+ tones_list += [int(tone)] * len(phone)
167
+ return phones_list, tones_list, word2ph
168
+
169
+
170
+ def text_normalize(text):
171
+ numbers = re.findall(r"\d+(?:\.?\d+)?", text)
172
+ for number in numbers:
173
+ text = text.replace(number, cn2an.an2cn(number), 1)
174
+ text = replace_punctuation(text)
175
+ return text
176
+
177
+
178
+ def get_bert_feature(text, word2ph):
179
+ from text import chinese_bert
180
+
181
+ return chinese_bert.get_bert_feature(text, word2ph)
182
+
183
+
184
+ if __name__ == "__main__":
185
+ from text.chinese_bert import get_bert_feature
186
+
187
+ text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
188
+ text = text_normalize(text)
189
+ print(text)
190
+ phones, tones, word2ph = g2p(text)
191
+ bert = get_bert_feature(text, word2ph)
192
+
193
+ print(phones, tones, word2ph, bert.shape)
194
+
195
+
196
+ # # 示例用法
197
+ # text = "这是一个示例文本:,你好!这是一个测试...."
198
+ # print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
text/chinese_bert.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import sys
3
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large")
6
+
7
+ models = dict()
8
+
9
+
10
+ def get_bert_feature(text, word2ph, device=None):
11
+ if (
12
+ sys.platform == "darwin"
13
+ and torch.backends.mps.is_available()
14
+ and device == "cpu"
15
+ ):
16
+ device = "mps"
17
+ if not device:
18
+ device = "cuda"
19
+ if device not in models.keys():
20
+ models[device] = AutoModelForMaskedLM.from_pretrained(
21
+ "./bert/chinese-roberta-wwm-ext-large"
22
+ ).to(device)
23
+ with torch.no_grad():
24
+ inputs = tokenizer(text, return_tensors="pt")
25
+ for i in inputs:
26
+ inputs[i] = inputs[i].to(device)
27
+ res = models[device](**inputs, output_hidden_states=True)
28
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
29
+
30
+ assert len(word2ph) == len(text) + 2
31
+ word2phone = word2ph
32
+ phone_level_feature = []
33
+ for i in range(len(word2phone)):
34
+ repeat_feature = res[i].repeat(word2phone[i], 1)
35
+ phone_level_feature.append(repeat_feature)
36
+
37
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
38
+
39
+ return phone_level_feature.T
40
+
41
+
42
+ if __name__ == "__main__":
43
+ import torch
44
+
45
+ word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征
46
+ word2phone = [
47
+ 1,
48
+ 2,
49
+ 1,
50
+ 2,
51
+ 2,
52
+ 1,
53
+ 2,
54
+ 2,
55
+ 1,
56
+ 2,
57
+ 2,
58
+ 1,
59
+ 2,
60
+ 2,
61
+ 2,
62
+ 2,
63
+ 2,
64
+ 1,
65
+ 1,
66
+ 2,
67
+ 2,
68
+ 1,
69
+ 2,
70
+ 2,
71
+ 2,
72
+ 2,
73
+ 1,
74
+ 2,
75
+ 2,
76
+ 2,
77
+ 2,
78
+ 2,
79
+ 1,
80
+ 2,
81
+ 2,
82
+ 2,
83
+ 2,
84
+ 1,
85
+ ]
86
+
87
+ # 计算总帧数
88
+ total_frames = sum(word2phone)
89
+ print(word_level_feature.shape)
90
+ print(word2phone)
91
+ phone_level_feature = []
92
+ for i in range(len(word2phone)):
93
+ print(word_level_feature[i].shape)
94
+
95
+ # 对每个词重复word2phone[i]次
96
+ repeat_feature = word_level_feature[i].repeat(word2phone[i], 1)
97
+ phone_level_feature.append(repeat_feature)
98
+
99
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
100
+ print(phone_level_feature.shape) # torch.Size([36, 1024])
text/cleaner.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from text import chinese, japanese, cleaned_text_to_sequence
2
+
3
+
4
+ language_module_map = {"ZH": chinese, "JP": japanese}
5
+
6
+
7
+ def clean_text(text, language):
8
+ language_module = language_module_map[language]
9
+ norm_text = language_module.text_normalize(text)
10
+ phones, tones, word2ph = language_module.g2p(norm_text)
11
+ return norm_text, phones, tones, word2ph
12
+
13
+
14
+ def clean_text_bert(text, language):
15
+ language_module = language_module_map[language]
16
+ norm_text = language_module.text_normalize(text)
17
+ phones, tones, word2ph = language_module.g2p(norm_text)
18
+ bert = language_module.get_bert_feature(norm_text, word2ph)
19
+ return phones, tones, bert
20
+
21
+
22
+ def text_to_sequence(text, language):
23
+ norm_text, phones, tones, word2ph = clean_text(text, language)
24
+ return cleaned_text_to_sequence(phones, tones, language)
25
+
26
+
27
+ if __name__ == "__main__":
28
+ pass
text/cmudict.rep ADDED
The diff for this file is too large to render. See raw diff
 
text/cmudict_cache.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b21b20325471934ba92f2e4a5976989e7d920caa32e7a286eacb027d197949
3
+ size 6212655