Upload 8 files
Browse files- README.md +36 -0
- configs/config_jp_extra.json +86 -0
- pretrained_jp_extra/DUR_0.safetensors +3 -0
- pretrained_jp_extra/D_0.safetensors +3 -0
- pretrained_jp_extra/G_0.safetensors +3 -0
- pretrained_jp_extra/WD_0.safetensors +3 -0
- style_bert_vits2/nlp/japanese/normalizer.py +176 -0
- style_bert_vits2/nlp/symbols.py +199 -0
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Style-Bert-VITS2用事前学習モデルjp_extra_large_Ver20240627_20240630
|
3 |
+
|
4 |
+
|
5 |
+
====許可している内容====
|
6 |
+
1:githubやhuggingface等の不特定多数がダウンロード可能なサイトへのアップロード(転載)
|
7 |
+
|
8 |
+
2:この事前学習モデルは俺 or 私が作った!という自作発言及び、自身の成果物としての宣伝・配布・販売
|
9 |
+
|
10 |
+
3:禁止事項の改変やライセンスの変更(自作発言をする場合は自由に変更して構わない)
|
11 |
+
|
12 |
+
|
13 |
+
====禁止事項====
|
14 |
+
1:転載時に転載元を記載しない。
|
15 |
+
*悪意あるサイトへの誘導を防ぐ為。
|
16 |
+
|
17 |
+
2:転載時に転載元のアップロード者及び開発者に関する内容を記載しない。
|
18 |
+
*転載しただけの人が開発者と混同されないようにするため。
|
19 |
+
**自作発言をした人が出てきた場合に混沌とするため。
|
20 |
+
|
21 |
+
|
22 |
+
====使用上の注意====
|
23 |
+
使用可能な記号として: ; = # < > ^ ( ) *の計10個を追加しています。
|
24 |
+
|
25 |
+
追加学習に用いるデータが多い場合に差が出やすいです。
|
26 |
+
|
27 |
+
VRAM16GB以上の環境で学習をするのを想定しています。
|
28 |
+
|
29 |
+
G_XXXXX.pthのサイズは約1.4GB
|
30 |
+
|
31 |
+
XXXX_eYYY_sZZZZZZ.safetensorsのサイズは約400MB
|
32 |
+
|
33 |
+
|
34 |
+
====使い方====
|
35 |
+
各フォルダーに上書き。
|
36 |
+
|
configs/config_jp_extra.json
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "Dummy",
|
3 |
+
"train": {
|
4 |
+
"log_interval": 500,
|
5 |
+
"eval_interval": 10000,
|
6 |
+
"seed": 42,
|
7 |
+
"epochs": 200,
|
8 |
+
"learning_rate": 9e-05,
|
9 |
+
"betas": [
|
10 |
+
0.8,
|
11 |
+
0.99
|
12 |
+
],
|
13 |
+
"eps": 1e-09,
|
14 |
+
"batch_size": 3,
|
15 |
+
"bf16_run": false,
|
16 |
+
"fp16_run": false,
|
17 |
+
"lr_decay": 0.99996,
|
18 |
+
"segment_size": 16384,
|
19 |
+
"init_lr_ratio": 1,
|
20 |
+
"warmup_epochs": 0,
|
21 |
+
"c_mel": 45,
|
22 |
+
"c_kl": 1.0,
|
23 |
+
"c_commit": 100,
|
24 |
+
"skip_optimizer": false,
|
25 |
+
"freeze_ZH_bert": false,
|
26 |
+
"freeze_JP_bert": false,
|
27 |
+
"freeze_EN_bert": false,
|
28 |
+
"freeze_emo": false,
|
29 |
+
"freeze_style": false,
|
30 |
+
"freeze_decoder": false
|
31 |
+
},
|
32 |
+
"data": {
|
33 |
+
"use_jp_extra": true,
|
34 |
+
"training_files": "Data/Dummy/train.list",
|
35 |
+
"validation_files": "Data/Dummy/val.list",
|
36 |
+
"max_wav_value": 32768.0,
|
37 |
+
"sampling_rate": 44100,
|
38 |
+
"filter_length": 2048,
|
39 |
+
"hop_length": 512,
|
40 |
+
"win_length": 2048,
|
41 |
+
"n_mel_channels": 256,
|
42 |
+
"mel_fmin": 0.0,
|
43 |
+
"mel_fmax": null,
|
44 |
+
"add_blank": true,
|
45 |
+
"n_speakers": 1,
|
46 |
+
"cleaned_text": true,
|
47 |
+
"spk2id": {
|
48 |
+
",00,": 0
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"model": {
|
52 |
+
"use_spk_conditioned_encoder": true,
|
53 |
+
"use_noise_scaled_mas": true,
|
54 |
+
"use_mel_posterior_encoder": true,
|
55 |
+
"use_duration_discriminator": true,
|
56 |
+
"use_wavlm_discriminator": true,
|
57 |
+
"inter_channels": 256,
|
58 |
+
"hidden_channels": 256,
|
59 |
+
"filter_channels": 1024,
|
60 |
+
"n_heads": 4,
|
61 |
+
"n_layers": 6,
|
62 |
+
"kernel_size": 3,
|
63 |
+
"p_dropout": 0.1,
|
64 |
+
"resblock": "1",
|
65 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
66 |
+
"resblock_dilation_sizes": [
|
67 |
+
[1, 3, 5],
|
68 |
+
[1, 3, 5],
|
69 |
+
[1, 3, 5]
|
70 |
+
],
|
71 |
+
"upsample_rates": [8, 8, 2, 2, 2],
|
72 |
+
"upsample_initial_channel": 512,
|
73 |
+
"upsample_kernel_sizes": [16, 16, 8, 2, 2],
|
74 |
+
"n_layers_q": 3,
|
75 |
+
"use_spectral_norm": false,
|
76 |
+
"gin_channels": 768,
|
77 |
+
"slm": {
|
78 |
+
"model": "./slm/wavlm-base-plus",
|
79 |
+
"sr": 16000,
|
80 |
+
"hidden": 768,
|
81 |
+
"nlayers": 13,
|
82 |
+
"initial_channel": 64
|
83 |
+
}
|
84 |
+
},
|
85 |
+
"version": "2.5.0-JP-Extra"
|
86 |
+
}
|
pretrained_jp_extra/DUR_0.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d44a7c1f62662ef7b24ef7464b4b06ad3db0b8f7791f16f03abe957056b277d
|
3 |
+
size 8680228
|
pretrained_jp_extra/D_0.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2835f76e6762c6c4840c6eb5cc8bac6ce7d0ca7ff7e1a4bce728690db467ff6
|
3 |
+
size 187000064
|
pretrained_jp_extra/G_0.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76cddf68770aa94b10e625a0fd7f86ddd29bde3960a288e1421451130dc9f05a
|
3 |
+
size 477947964
|
pretrained_jp_extra/WD_0.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d1a6c02aebedf6f47c8b5af5db4e6fdb18c02d7afe343671ff2a9953384bb6e
|
3 |
+
size 4695736
|
style_bert_vits2/nlp/japanese/normalizer.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
記号類の正規化変換マップの; : 「 」 括弧全般の扱いを変更
|
3 |
+
記号類の正規化変換マップに、= < > # ^ *を追加
|
4 |
+
|
5 |
+
|
6 |
+
"""
|
7 |
+
|
8 |
+
import re
|
9 |
+
import unicodedata
|
10 |
+
|
11 |
+
from num2words import num2words
|
12 |
+
|
13 |
+
from style_bert_vits2.nlp.symbols import PUNCTUATIONS
|
14 |
+
|
15 |
+
|
16 |
+
def normalize_text(text: str) -> str:
|
17 |
+
"""
|
18 |
+
日本語のテキストを正規化する。
|
19 |
+
結果は、ちょうど次の文字のみからなる:
|
20 |
+
- ひらがな
|
21 |
+
- カタカナ(全角長音記号「ー」が入る!)
|
22 |
+
- 漢字
|
23 |
+
- 半角アルファベット(大文字と小文字)
|
24 |
+
- ギリシャ文字
|
25 |
+
- `.` (句点`。`や`…`の一部や改行等)
|
26 |
+
- `,` (読点`、`や`:`等)
|
27 |
+
- `?` (疑問符`?`)
|
28 |
+
- `!` (感嘆符`!`)
|
29 |
+
- `'` (`「`や`」`等)
|
30 |
+
- `-` (`―`(ダッシュ、長音記号ではない)や`-`等)
|
31 |
+
|
32 |
+
注意点:
|
33 |
+
- 三点リーダー`…`は`...`に変換される(`なるほど…。` → `なるほど....`)
|
34 |
+
- 数字は漢字に変換される(`1,100円` → `千百円`、`52.34` → `五十二点三四`)
|
35 |
+
- 読点や疑問符等の位置・個数等は保持される(`??あ、、!!!` → `??あ,,!!!`)
|
36 |
+
|
37 |
+
Args:
|
38 |
+
text (str): 正規化するテキスト
|
39 |
+
|
40 |
+
Returns:
|
41 |
+
str: 正規化されたテキスト
|
42 |
+
"""
|
43 |
+
|
44 |
+
res = unicodedata.normalize("NFKC", text) # ここでアルファベットは半角になる
|
45 |
+
res = __convert_numbers_to_words(res) # 「100円」→「百円」等
|
46 |
+
# 「~」と「〜」と「~」も長音記号として扱う
|
47 |
+
res = res.replace("~", "ー")
|
48 |
+
res = res.replace("~", "ー")
|
49 |
+
res = res.replace("〜", "ー")
|
50 |
+
|
51 |
+
res = replace_punctuation(res) # 句読点等正規化、読めない文字を削除
|
52 |
+
|
53 |
+
# 結合文字の濁点・半濁点を削除
|
54 |
+
# 通常の「ば」等はそのままのこされる、「あ゛」は上で「あ゙」になりここで「あ」になる
|
55 |
+
res = res.replace("\u3099", "") # 結合文字の濁点を削除、る゙ → る
|
56 |
+
res = res.replace("\u309A", "") # 結合文字の半濁点を削除、な゚ → な
|
57 |
+
return res
|
58 |
+
|
59 |
+
|
60 |
+
def replace_punctuation(text: str) -> str:
|
61 |
+
"""
|
62 |
+
句読点等を「.」「,」「!」「?」「'」「-」に正規化し、OpenJTalk で読みが取得できるもののみ残す:
|
63 |
+
漢字・平仮名・カタカナ、アルファベット、ギリシャ文字
|
64 |
+
|
65 |
+
Args:
|
66 |
+
text (str): 正規化するテキスト
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
str: 正規化されたテキスト
|
70 |
+
"""
|
71 |
+
|
72 |
+
# 記号類の正規化変換マップ
|
73 |
+
REPLACE_MAP = {
|
74 |
+
":": ":",
|
75 |
+
";": ";",
|
76 |
+
",": ",",
|
77 |
+
"。": ".",
|
78 |
+
"!": "!",
|
79 |
+
"?": "?",
|
80 |
+
"\n": ".",
|
81 |
+
".": ".",
|
82 |
+
"…": "...",
|
83 |
+
"···": "...",
|
84 |
+
"・・・": "...",
|
85 |
+
"·": ",",
|
86 |
+
"・": ",",
|
87 |
+
"、": ",",
|
88 |
+
"$": ".",
|
89 |
+
"“": "'",
|
90 |
+
"”": "'",
|
91 |
+
'"': "'",
|
92 |
+
"‘": "'",
|
93 |
+
"’": "'",
|
94 |
+
"(": "(",
|
95 |
+
")": ")",
|
96 |
+
"(": "(",
|
97 |
+
")": ")",
|
98 |
+
"《": "(",
|
99 |
+
"》": ")",
|
100 |
+
"【": "(",
|
101 |
+
"】": ")",
|
102 |
+
"[": "(",
|
103 |
+
"]": ")",
|
104 |
+
# NFKC 正規化後のハイフン・ダッシュの変種を全て通常半角ハイフン - \u002d に変換
|
105 |
+
"\u02d7": "\u002d", # ˗, Modifier Letter Minus Sign
|
106 |
+
"\u2010": "\u002d", # ‐, Hyphen,
|
107 |
+
# "\u2011": "\u002d", # ‑, Non-Breaking Hyphen, NFKC により \u2010 に変換される
|
108 |
+
"\u2012": "\u002d", # ‒, Figure Dash
|
109 |
+
"\u2013": "\u002d", # –, En Dash
|
110 |
+
"\u2014": "\u002d", # —, Em Dash
|
111 |
+
"\u2015": "\u002d", # ―, Horizontal Bar
|
112 |
+
"\u2043": "\u002d", # ⁃, Hyphen Bullet
|
113 |
+
"\u2212": "\u002d", # −, Minus Sign
|
114 |
+
"\u23af": "\u002d", # ⎯, Horizontal Line Extension
|
115 |
+
"\u23e4": "\u002d", # ⏤, Straightness
|
116 |
+
"\u2500": "\u002d", # ─, Box Drawings Light Horizontal
|
117 |
+
"\u2501": "\u002d", # ━, Box Drawings Heavy Horizontal
|
118 |
+
"\u2e3a": "\u002d", # ⸺, Two-Em Dash
|
119 |
+
"\u2e3b": "\u002d", # ⸻, Three-Em Dash
|
120 |
+
# "~": "-", # これは長音記号「ー」として扱うよう変更
|
121 |
+
# "~": "-", # これも長音記号「ー」として扱うよう変更
|
122 |
+
"「": "'",
|
123 |
+
"」": "'",
|
124 |
+
"=": "=",
|
125 |
+
"<": "<",
|
126 |
+
">": ">",
|
127 |
+
"#": "#",
|
128 |
+
"^": "^",
|
129 |
+
"*": "*",
|
130 |
+
}
|
131 |
+
|
132 |
+
pattern = re.compile("|".join(re.escape(p) for p in REPLACE_MAP.keys()))
|
133 |
+
|
134 |
+
# 句読点を辞書で置換
|
135 |
+
replaced_text = pattern.sub(lambda x: REPLACE_MAP[x.group()], text)
|
136 |
+
|
137 |
+
replaced_text = re.sub(
|
138 |
+
# ↓ ひらがな、カタカナ、漢字
|
139 |
+
r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF\u3005"
|
140 |
+
# ↓ 半角アルファベット(大文字と小文字)
|
141 |
+
+ r"\u0041-\u005A\u0061-\u007A"
|
142 |
+
# ↓ 全角アルファベット(大文字と小文字)
|
143 |
+
+ r"\uFF21-\uFF3A\uFF41-\uFF5A"
|
144 |
+
# ↓ ギリシャ文字
|
145 |
+
+ r"\u0370-\u03FF\u1F00-\u1FFF"
|
146 |
+
# ↓ "!", "?", "…", ",", ".", "'", "-", 但し`…`はすでに`...`に変換されている
|
147 |
+
+ "".join(PUNCTUATIONS) + r"]+",
|
148 |
+
# 上述以外の文字を削除
|
149 |
+
"",
|
150 |
+
replaced_text,
|
151 |
+
)
|
152 |
+
|
153 |
+
return replaced_text
|
154 |
+
|
155 |
+
|
156 |
+
def __convert_numbers_to_words(text: str) -> str:
|
157 |
+
"""
|
158 |
+
記号や数字を日本語の文字表現に変換する。
|
159 |
+
|
160 |
+
Args:
|
161 |
+
text (str): 変換するテキスト
|
162 |
+
|
163 |
+
Returns:
|
164 |
+
str: 変換されたテキスト
|
165 |
+
"""
|
166 |
+
|
167 |
+
NUMBER_WITH_SEPARATOR_PATTERN = re.compile("[0-9]{1,3}(,[0-9]{3})+")
|
168 |
+
CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"}
|
169 |
+
CURRENCY_PATTERN = re.compile(r"([$¥£€])([0-9.]*[0-9])")
|
170 |
+
NUMBER_PATTERN = re.compile(r"[0-9]+(\.[0-9]+)?")
|
171 |
+
|
172 |
+
res = NUMBER_WITH_SEPARATOR_PATTERN.sub(lambda m: m[0].replace(",", ""), text)
|
173 |
+
res = CURRENCY_PATTERN.sub(lambda m: m[2] + CURRENCY_MAP.get(m[1], m[1]), res)
|
174 |
+
res = NUMBER_PATTERN.sub(lambda m: num2words(m[0], lang="ja"), res)
|
175 |
+
|
176 |
+
return res
|
style_bert_vits2/nlp/symbols.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
PUNCTUATIONSに ":", ";", "=", "#", "<", ">", "^", "(", ")", "*"を追加
|
3 |
+
|
4 |
+
"""
|
5 |
+
|
6 |
+
# Punctuations
|
7 |
+
PUNCTUATIONS = ["!", "?", "…", ",", ".", "'", "-", ":", ";", "=", "#", "<", ">", "^", "(", ")", "*"]
|
8 |
+
|
9 |
+
# Punctuations and special tokens
|
10 |
+
PUNCTUATION_SYMBOLS = PUNCTUATIONS + ["SP", "UNK"]
|
11 |
+
|
12 |
+
# Padding
|
13 |
+
PAD = "_"
|
14 |
+
|
15 |
+
# Chinese symbols
|
16 |
+
ZH_SYMBOLS = [
|
17 |
+
"E",
|
18 |
+
"En",
|
19 |
+
"a",
|
20 |
+
"ai",
|
21 |
+
"an",
|
22 |
+
"ang",
|
23 |
+
"ao",
|
24 |
+
"b",
|
25 |
+
"c",
|
26 |
+
"ch",
|
27 |
+
"d",
|
28 |
+
"e",
|
29 |
+
"ei",
|
30 |
+
"en",
|
31 |
+
"eng",
|
32 |
+
"er",
|
33 |
+
"f",
|
34 |
+
"g",
|
35 |
+
"h",
|
36 |
+
"i",
|
37 |
+
"i0",
|
38 |
+
"ia",
|
39 |
+
"ian",
|
40 |
+
"iang",
|
41 |
+
"iao",
|
42 |
+
"ie",
|
43 |
+
"in",
|
44 |
+
"ing",
|
45 |
+
"iong",
|
46 |
+
"ir",
|
47 |
+
"iu",
|
48 |
+
"j",
|
49 |
+
"k",
|
50 |
+
"l",
|
51 |
+
"m",
|
52 |
+
"n",
|
53 |
+
"o",
|
54 |
+
"ong",
|
55 |
+
"ou",
|
56 |
+
"p",
|
57 |
+
"q",
|
58 |
+
"r",
|
59 |
+
"s",
|
60 |
+
"sh",
|
61 |
+
"t",
|
62 |
+
"u",
|
63 |
+
"ua",
|
64 |
+
"uai",
|
65 |
+
"uan",
|
66 |
+
"uang",
|
67 |
+
"ui",
|
68 |
+
"un",
|
69 |
+
"uo",
|
70 |
+
"v",
|
71 |
+
"van",
|
72 |
+
"ve",
|
73 |
+
"vn",
|
74 |
+
"w",
|
75 |
+
"x",
|
76 |
+
"y",
|
77 |
+
"z",
|
78 |
+
"zh",
|
79 |
+
"AA",
|
80 |
+
"EE",
|
81 |
+
"OO",
|
82 |
+
]
|
83 |
+
NUM_ZH_TONES = 6
|
84 |
+
|
85 |
+
# Japanese
|
86 |
+
JP_SYMBOLS = [
|
87 |
+
"N",
|
88 |
+
"a",
|
89 |
+
"a:",
|
90 |
+
"b",
|
91 |
+
"by",
|
92 |
+
"ch",
|
93 |
+
"d",
|
94 |
+
"dy",
|
95 |
+
"e",
|
96 |
+
"e:",
|
97 |
+
"f",
|
98 |
+
"g",
|
99 |
+
"gy",
|
100 |
+
"h",
|
101 |
+
"hy",
|
102 |
+
"i",
|
103 |
+
"i:",
|
104 |
+
"j",
|
105 |
+
"k",
|
106 |
+
"ky",
|
107 |
+
"m",
|
108 |
+
"my",
|
109 |
+
"n",
|
110 |
+
"ny",
|
111 |
+
"o",
|
112 |
+
"o:",
|
113 |
+
"p",
|
114 |
+
"py",
|
115 |
+
"q",
|
116 |
+
"r",
|
117 |
+
"ry",
|
118 |
+
"s",
|
119 |
+
"sh",
|
120 |
+
"t",
|
121 |
+
"ts",
|
122 |
+
"ty",
|
123 |
+
"u",
|
124 |
+
"u:",
|
125 |
+
"w",
|
126 |
+
"y",
|
127 |
+
"z",
|
128 |
+
"zy",
|
129 |
+
]
|
130 |
+
NUM_JP_TONES = 2
|
131 |
+
|
132 |
+
# English
|
133 |
+
EN_SYMBOLS = [
|
134 |
+
"aa",
|
135 |
+
"ae",
|
136 |
+
"ah",
|
137 |
+
"ao",
|
138 |
+
"aw",
|
139 |
+
"ay",
|
140 |
+
"b",
|
141 |
+
"ch",
|
142 |
+
"d",
|
143 |
+
"dh",
|
144 |
+
"eh",
|
145 |
+
"er",
|
146 |
+
"ey",
|
147 |
+
"f",
|
148 |
+
"g",
|
149 |
+
"hh",
|
150 |
+
"ih",
|
151 |
+
"iy",
|
152 |
+
"jh",
|
153 |
+
"k",
|
154 |
+
"l",
|
155 |
+
"m",
|
156 |
+
"n",
|
157 |
+
"ng",
|
158 |
+
"ow",
|
159 |
+
"oy",
|
160 |
+
"p",
|
161 |
+
"r",
|
162 |
+
"s",
|
163 |
+
"sh",
|
164 |
+
"t",
|
165 |
+
"th",
|
166 |
+
"uh",
|
167 |
+
"uw",
|
168 |
+
"V",
|
169 |
+
"w",
|
170 |
+
"y",
|
171 |
+
"z",
|
172 |
+
"zh",
|
173 |
+
]
|
174 |
+
NUM_EN_TONES = 4
|
175 |
+
|
176 |
+
# Combine all symbols
|
177 |
+
NORMAL_SYMBOLS = sorted(set(ZH_SYMBOLS + JP_SYMBOLS + EN_SYMBOLS))
|
178 |
+
SYMBOLS = [PAD] + NORMAL_SYMBOLS + PUNCTUATION_SYMBOLS
|
179 |
+
SIL_PHONEMES_IDS = [SYMBOLS.index(i) for i in PUNCTUATION_SYMBOLS]
|
180 |
+
|
181 |
+
# Combine all tones
|
182 |
+
NUM_TONES = NUM_ZH_TONES + NUM_JP_TONES + NUM_EN_TONES
|
183 |
+
|
184 |
+
# Language maps
|
185 |
+
LANGUAGE_ID_MAP = {"ZH": 0, "JP": 1, "EN": 2}
|
186 |
+
NUM_LANGUAGES = len(LANGUAGE_ID_MAP.keys())
|
187 |
+
|
188 |
+
# Language tone start map
|
189 |
+
LANGUAGE_TONE_START_MAP = {
|
190 |
+
"ZH": 0,
|
191 |
+
"JP": NUM_ZH_TONES,
|
192 |
+
"EN": NUM_ZH_TONES + NUM_JP_TONES,
|
193 |
+
}
|
194 |
+
|
195 |
+
|
196 |
+
if __name__ == "__main__":
|
197 |
+
a = set(ZH_SYMBOLS)
|
198 |
+
b = set(EN_SYMBOLS)
|
199 |
+
print(sorted(a & b))
|