SohomToom commited on
Commit
54f6b5e
·
verified ·
1 Parent(s): cace103

Update MeloTTS/melo/text/english.py

Browse files
Files changed (1) hide show
  1. MeloTTS/melo/text/english.py +284 -284
MeloTTS/melo/text/english.py CHANGED
@@ -1,284 +1,284 @@
1
- import pickle
2
- import os
3
- import re
4
- from g2p_en import G2p
5
-
6
- from . import symbols
7
-
8
- from .english_utils.abbreviations import expand_abbreviations
9
- from .english_utils.time_norm import expand_time_english
10
- from .english_utils.number_norm import normalize_numbers
11
- from .japanese import distribute_phone
12
-
13
- from transformers import AutoTokenizer
14
-
15
- current_file_path = os.path.dirname(__file__)
16
- CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
17
- CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
18
- _g2p = G2p()
19
-
20
- arpa = {
21
- "AH0",
22
- "S",
23
- "AH1",
24
- "EY2",
25
- "AE2",
26
- "EH0",
27
- "OW2",
28
- "UH0",
29
- "NG",
30
- "B",
31
- "G",
32
- "AY0",
33
- "M",
34
- "AA0",
35
- "F",
36
- "AO0",
37
- "ER2",
38
- "UH1",
39
- "IY1",
40
- "AH2",
41
- "DH",
42
- "IY0",
43
- "EY1",
44
- "IH0",
45
- "K",
46
- "N",
47
- "W",
48
- "IY2",
49
- "T",
50
- "AA1",
51
- "ER1",
52
- "EH2",
53
- "OY0",
54
- "UH2",
55
- "UW1",
56
- "Z",
57
- "AW2",
58
- "AW1",
59
- "V",
60
- "UW2",
61
- "AA2",
62
- "ER",
63
- "AW0",
64
- "UW0",
65
- "R",
66
- "OW1",
67
- "EH1",
68
- "ZH",
69
- "AE0",
70
- "IH2",
71
- "IH",
72
- "Y",
73
- "JH",
74
- "P",
75
- "AY1",
76
- "EY0",
77
- "OY2",
78
- "TH",
79
- "HH",
80
- "D",
81
- "ER0",
82
- "CH",
83
- "AO1",
84
- "AE1",
85
- "AO2",
86
- "OY1",
87
- "AY2",
88
- "IH1",
89
- "OW0",
90
- "L",
91
- "SH",
92
- }
93
-
94
-
95
- def post_replace_ph(ph):
96
- rep_map = {
97
- ":": ",",
98
- ";": ",",
99
- ",": ",",
100
- "。": ".",
101
- "!": "!",
102
- "?": "?",
103
- "\n": ".",
104
- "·": ",",
105
- "、": ",",
106
- "...": "…",
107
- "v": "V",
108
- }
109
- if ph in rep_map.keys():
110
- ph = rep_map[ph]
111
- if ph in symbols:
112
- return ph
113
- if ph not in symbols:
114
- ph = "UNK"
115
- return ph
116
-
117
-
118
- def read_dict():
119
- g2p_dict = {}
120
- start_line = 49
121
- with open(CMU_DICT_PATH) as f:
122
- line = f.readline()
123
- line_index = 1
124
- while line:
125
- if line_index >= start_line:
126
- line = line.strip()
127
- word_split = line.split(" ")
128
- word = word_split[0]
129
-
130
- syllable_split = word_split[1].split(" - ")
131
- g2p_dict[word] = []
132
- for syllable in syllable_split:
133
- phone_split = syllable.split(" ")
134
- g2p_dict[word].append(phone_split)
135
-
136
- line_index = line_index + 1
137
- line = f.readline()
138
-
139
- return g2p_dict
140
-
141
-
142
- def cache_dict(g2p_dict, file_path):
143
- with open(file_path, "wb") as pickle_file:
144
- pickle.dump(g2p_dict, pickle_file)
145
-
146
-
147
- def get_dict():
148
- if os.path.exists(CACHE_PATH):
149
- with open(CACHE_PATH, "rb") as pickle_file:
150
- g2p_dict = pickle.load(pickle_file)
151
- else:
152
- g2p_dict = read_dict()
153
- cache_dict(g2p_dict, CACHE_PATH)
154
-
155
- return g2p_dict
156
-
157
-
158
- eng_dict = get_dict()
159
-
160
-
161
- def refine_ph(phn):
162
- tone = 0
163
- if re.search(r"\d$", phn):
164
- tone = int(phn[-1]) + 1
165
- phn = phn[:-1]
166
- return phn.lower(), tone
167
-
168
-
169
- def refine_syllables(syllables):
170
- tones = []
171
- phonemes = []
172
- for phn_list in syllables:
173
- for i in range(len(phn_list)):
174
- phn = phn_list[i]
175
- phn, tone = refine_ph(phn)
176
- phonemes.append(phn)
177
- tones.append(tone)
178
- return phonemes, tones
179
-
180
-
181
- def text_normalize(text):
182
- text = text.lower()
183
- text = expand_time_english(text)
184
- text = normalize_numbers(text)
185
- text = expand_abbreviations(text)
186
- return text
187
-
188
- model_id = 'bert-base-uncased'
189
- tokenizer = AutoTokenizer.from_pretrained(model_id)
190
- def g2p_old(text):
191
- tokenized = tokenizer.tokenize(text)
192
- # import pdb; pdb.set_trace()
193
- phones = []
194
- tones = []
195
- words = re.split(r"([,;.\-\?\!\s+])", text)
196
- for w in words:
197
- if w.upper() in eng_dict:
198
- phns, tns = refine_syllables(eng_dict[w.upper()])
199
- phones += phns
200
- tones += tns
201
- else:
202
- phone_list = list(filter(lambda p: p != " ", _g2p(w)))
203
- for ph in phone_list:
204
- if ph in arpa:
205
- ph, tn = refine_ph(ph)
206
- phones.append(ph)
207
- tones.append(tn)
208
- else:
209
- phones.append(ph)
210
- tones.append(0)
211
- # todo: implement word2ph
212
- word2ph = [1 for i in phones]
213
-
214
- phones = [post_replace_ph(i) for i in phones]
215
- return phones, tones, word2ph
216
-
217
- def g2p(text, pad_start_end=True, tokenized=None):
218
- if tokenized is None:
219
- tokenized = tokenizer.tokenize(text)
220
- # import pdb; pdb.set_trace()
221
- phs = []
222
- ph_groups = []
223
- for t in tokenized:
224
- if not t.startswith("#"):
225
- ph_groups.append([t])
226
- else:
227
- ph_groups[-1].append(t.replace("#", ""))
228
-
229
- phones = []
230
- tones = []
231
- word2ph = []
232
- for group in ph_groups:
233
- w = "".join(group)
234
- phone_len = 0
235
- word_len = len(group)
236
- if w.upper() in eng_dict:
237
- phns, tns = refine_syllables(eng_dict[w.upper()])
238
- phones += phns
239
- tones += tns
240
- phone_len += len(phns)
241
- else:
242
- phone_list = list(filter(lambda p: p != " ", _g2p(w)))
243
- for ph in phone_list:
244
- if ph in arpa:
245
- ph, tn = refine_ph(ph)
246
- phones.append(ph)
247
- tones.append(tn)
248
- else:
249
- phones.append(ph)
250
- tones.append(0)
251
- phone_len += 1
252
- aaa = distribute_phone(phone_len, word_len)
253
- word2ph += aaa
254
- phones = [post_replace_ph(i) for i in phones]
255
-
256
- if pad_start_end:
257
- phones = ["_"] + phones + ["_"]
258
- tones = [0] + tones + [0]
259
- word2ph = [1] + word2ph + [1]
260
- return phones, tones, word2ph
261
-
262
- def get_bert_feature(text, word2ph, device=None):
263
- from text import english_bert
264
-
265
- return english_bert.get_bert_feature(text, word2ph, device=device)
266
-
267
- if __name__ == "__main__":
268
- # print(get_dict())
269
- # print(eng_word_to_phoneme("hello"))
270
- from text.english_bert import get_bert_feature
271
- text = "In this paper, we propose 1 DSPGAN, a N-F-T GAN-based universal vocoder."
272
- text = text_normalize(text)
273
- phones, tones, word2ph = g2p(text)
274
- import pdb; pdb.set_trace()
275
- bert = get_bert_feature(text, word2ph)
276
-
277
- print(phones, tones, word2ph, bert.shape)
278
-
279
- # all_phones = set()
280
- # for k, syllables in eng_dict.items():
281
- # for group in syllables:
282
- # for ph in group:
283
- # all_phones.add(ph)
284
- # print(all_phones)
 
1
+ import pickle
2
+ import os
3
+ import re
4
+ from g2p_en import G2p
5
+
6
+ from . import symbols
7
+
8
+ from .english_utils.abbreviations import expand_abbreviations
9
+ from .english_utils.time_norm import expand_time_english
10
+ from .english_utils.number_norm import normalize_numbers
11
+ #from .japanese import distribute_phone
12
+
13
+ from transformers import AutoTokenizer
14
+
15
+ current_file_path = os.path.dirname(__file__)
16
+ CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
17
+ CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
18
+ _g2p = G2p()
19
+
20
+ arpa = {
21
+ "AH0",
22
+ "S",
23
+ "AH1",
24
+ "EY2",
25
+ "AE2",
26
+ "EH0",
27
+ "OW2",
28
+ "UH0",
29
+ "NG",
30
+ "B",
31
+ "G",
32
+ "AY0",
33
+ "M",
34
+ "AA0",
35
+ "F",
36
+ "AO0",
37
+ "ER2",
38
+ "UH1",
39
+ "IY1",
40
+ "AH2",
41
+ "DH",
42
+ "IY0",
43
+ "EY1",
44
+ "IH0",
45
+ "K",
46
+ "N",
47
+ "W",
48
+ "IY2",
49
+ "T",
50
+ "AA1",
51
+ "ER1",
52
+ "EH2",
53
+ "OY0",
54
+ "UH2",
55
+ "UW1",
56
+ "Z",
57
+ "AW2",
58
+ "AW1",
59
+ "V",
60
+ "UW2",
61
+ "AA2",
62
+ "ER",
63
+ "AW0",
64
+ "UW0",
65
+ "R",
66
+ "OW1",
67
+ "EH1",
68
+ "ZH",
69
+ "AE0",
70
+ "IH2",
71
+ "IH",
72
+ "Y",
73
+ "JH",
74
+ "P",
75
+ "AY1",
76
+ "EY0",
77
+ "OY2",
78
+ "TH",
79
+ "HH",
80
+ "D",
81
+ "ER0",
82
+ "CH",
83
+ "AO1",
84
+ "AE1",
85
+ "AO2",
86
+ "OY1",
87
+ "AY2",
88
+ "IH1",
89
+ "OW0",
90
+ "L",
91
+ "SH",
92
+ }
93
+
94
+
95
+ def post_replace_ph(ph):
96
+ rep_map = {
97
+ ":": ",",
98
+ ";": ",",
99
+ ",": ",",
100
+ "。": ".",
101
+ "!": "!",
102
+ "?": "?",
103
+ "\n": ".",
104
+ "·": ",",
105
+ "、": ",",
106
+ "...": "…",
107
+ "v": "V",
108
+ }
109
+ if ph in rep_map.keys():
110
+ ph = rep_map[ph]
111
+ if ph in symbols:
112
+ return ph
113
+ if ph not in symbols:
114
+ ph = "UNK"
115
+ return ph
116
+
117
+
118
+ def read_dict():
119
+ g2p_dict = {}
120
+ start_line = 49
121
+ with open(CMU_DICT_PATH) as f:
122
+ line = f.readline()
123
+ line_index = 1
124
+ while line:
125
+ if line_index >= start_line:
126
+ line = line.strip()
127
+ word_split = line.split(" ")
128
+ word = word_split[0]
129
+
130
+ syllable_split = word_split[1].split(" - ")
131
+ g2p_dict[word] = []
132
+ for syllable in syllable_split:
133
+ phone_split = syllable.split(" ")
134
+ g2p_dict[word].append(phone_split)
135
+
136
+ line_index = line_index + 1
137
+ line = f.readline()
138
+
139
+ return g2p_dict
140
+
141
+
142
+ def cache_dict(g2p_dict, file_path):
143
+ with open(file_path, "wb") as pickle_file:
144
+ pickle.dump(g2p_dict, pickle_file)
145
+
146
+
147
+ def get_dict():
148
+ if os.path.exists(CACHE_PATH):
149
+ with open(CACHE_PATH, "rb") as pickle_file:
150
+ g2p_dict = pickle.load(pickle_file)
151
+ else:
152
+ g2p_dict = read_dict()
153
+ cache_dict(g2p_dict, CACHE_PATH)
154
+
155
+ return g2p_dict
156
+
157
+
158
+ eng_dict = get_dict()
159
+
160
+
161
+ def refine_ph(phn):
162
+ tone = 0
163
+ if re.search(r"\d$", phn):
164
+ tone = int(phn[-1]) + 1
165
+ phn = phn[:-1]
166
+ return phn.lower(), tone
167
+
168
+
169
+ def refine_syllables(syllables):
170
+ tones = []
171
+ phonemes = []
172
+ for phn_list in syllables:
173
+ for i in range(len(phn_list)):
174
+ phn = phn_list[i]
175
+ phn, tone = refine_ph(phn)
176
+ phonemes.append(phn)
177
+ tones.append(tone)
178
+ return phonemes, tones
179
+
180
+
181
+ def text_normalize(text):
182
+ text = text.lower()
183
+ text = expand_time_english(text)
184
+ text = normalize_numbers(text)
185
+ text = expand_abbreviations(text)
186
+ return text
187
+
188
+ model_id = 'bert-base-uncased'
189
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
190
+ def g2p_old(text):
191
+ tokenized = tokenizer.tokenize(text)
192
+ # import pdb; pdb.set_trace()
193
+ phones = []
194
+ tones = []
195
+ words = re.split(r"([,;.\-\?\!\s+])", text)
196
+ for w in words:
197
+ if w.upper() in eng_dict:
198
+ phns, tns = refine_syllables(eng_dict[w.upper()])
199
+ phones += phns
200
+ tones += tns
201
+ else:
202
+ phone_list = list(filter(lambda p: p != " ", _g2p(w)))
203
+ for ph in phone_list:
204
+ if ph in arpa:
205
+ ph, tn = refine_ph(ph)
206
+ phones.append(ph)
207
+ tones.append(tn)
208
+ else:
209
+ phones.append(ph)
210
+ tones.append(0)
211
+ # todo: implement word2ph
212
+ word2ph = [1 for i in phones]
213
+
214
+ phones = [post_replace_ph(i) for i in phones]
215
+ return phones, tones, word2ph
216
+
217
+ def g2p(text, pad_start_end=True, tokenized=None):
218
+ if tokenized is None:
219
+ tokenized = tokenizer.tokenize(text)
220
+ # import pdb; pdb.set_trace()
221
+ phs = []
222
+ ph_groups = []
223
+ for t in tokenized:
224
+ if not t.startswith("#"):
225
+ ph_groups.append([t])
226
+ else:
227
+ ph_groups[-1].append(t.replace("#", ""))
228
+
229
+ phones = []
230
+ tones = []
231
+ word2ph = []
232
+ for group in ph_groups:
233
+ w = "".join(group)
234
+ phone_len = 0
235
+ word_len = len(group)
236
+ if w.upper() in eng_dict:
237
+ phns, tns = refine_syllables(eng_dict[w.upper()])
238
+ phones += phns
239
+ tones += tns
240
+ phone_len += len(phns)
241
+ else:
242
+ phone_list = list(filter(lambda p: p != " ", _g2p(w)))
243
+ for ph in phone_list:
244
+ if ph in arpa:
245
+ ph, tn = refine_ph(ph)
246
+ phones.append(ph)
247
+ tones.append(tn)
248
+ else:
249
+ phones.append(ph)
250
+ tones.append(0)
251
+ phone_len += 1
252
+ aaa = None #distribute_phone(phone_len, word_len)
253
+ word2ph += aaa
254
+ phones = [post_replace_ph(i) for i in phones]
255
+
256
+ if pad_start_end:
257
+ phones = ["_"] + phones + ["_"]
258
+ tones = [0] + tones + [0]
259
+ word2ph = [1] + word2ph + [1]
260
+ return phones, tones, word2ph
261
+
262
+ def get_bert_feature(text, word2ph, device=None):
263
+ from text import english_bert
264
+
265
+ return english_bert.get_bert_feature(text, word2ph, device=device)
266
+
267
+ if __name__ == "__main__":
268
+ # print(get_dict())
269
+ # print(eng_word_to_phoneme("hello"))
270
+ from text.english_bert import get_bert_feature
271
+ text = "In this paper, we propose 1 DSPGAN, a N-F-T GAN-based universal vocoder."
272
+ text = text_normalize(text)
273
+ phones, tones, word2ph = g2p(text)
274
+ import pdb; pdb.set_trace()
275
+ bert = get_bert_feature(text, word2ph)
276
+
277
+ print(phones, tones, word2ph, bert.shape)
278
+
279
+ # all_phones = set()
280
+ # for k, syllables in eng_dict.items():
281
+ # for group in syllables:
282
+ # for ph in group:
283
+ # all_phones.add(ph)
284
+ # print(all_phones)