Ailyth commited on
Commit
d8137a5
1 Parent(s): b2a5005

0330-125210-improve_ENG_inference

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. AR/__pycache__/__init__.cpython-310.pyc +0 -0
  2. AR/models/__pycache__/__init__.cpython-310.pyc +0 -0
  3. AR/models/__pycache__/t2s_lightning_module.cpython-310.pyc +0 -0
  4. AR/models/__pycache__/t2s_model.cpython-310.pyc +0 -0
  5. AR/models/__pycache__/utils.cpython-310.pyc +0 -0
  6. AR/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  7. AR/modules/__pycache__/activation.cpython-310.pyc +0 -0
  8. AR/modules/__pycache__/embedding.cpython-310.pyc +0 -0
  9. AR/modules/__pycache__/lr_schedulers.cpython-310.pyc +0 -0
  10. AR/modules/__pycache__/optim.cpython-310.pyc +0 -0
  11. AR/modules/__pycache__/patched_mha_with_cache.cpython-310.pyc +0 -0
  12. AR/modules/__pycache__/scaling.cpython-310.pyc +0 -0
  13. AR/modules/__pycache__/transformer.cpython-310.pyc +0 -0
  14. __pycache__/download.cpython-310.pyc +0 -0
  15. __pycache__/info.cpython-310.pyc +0 -0
  16. __pycache__/my_utils.cpython-310.pyc +0 -0
  17. __pycache__/utils.cpython-310.pyc +0 -0
  18. app.py +15 -4
  19. feature_extractor/__pycache__/__init__.cpython-310.pyc +0 -0
  20. feature_extractor/__pycache__/cnhubert.cpython-310.pyc +0 -0
  21. feature_extractor/__pycache__/whisper_enc.cpython-310.pyc +0 -0
  22. gweight.txt +0 -1
  23. module/__pycache__/__init__.cpython-310.pyc +0 -0
  24. module/__pycache__/attentions.cpython-310.pyc +0 -0
  25. module/__pycache__/commons.cpython-310.pyc +0 -0
  26. module/__pycache__/core_vq.cpython-310.pyc +0 -0
  27. module/__pycache__/mel_processing.cpython-310.pyc +0 -0
  28. module/__pycache__/models.cpython-310.pyc +0 -0
  29. module/__pycache__/modules.cpython-310.pyc +0 -0
  30. module/__pycache__/mrte_model.cpython-310.pyc +0 -0
  31. module/__pycache__/quantize.cpython-310.pyc +0 -0
  32. module/__pycache__/transforms.cpython-310.pyc +0 -0
  33. requirements.txt +2 -1
  34. sweight.txt +0 -1
  35. text/__pycache__/__init__.cpython-310.pyc +0 -0
  36. text/__pycache__/chinese.cpython-310.pyc +0 -0
  37. text/__pycache__/cleaner.cpython-310.pyc +0 -0
  38. text/__pycache__/english.cpython-310.pyc +0 -0
  39. text/__pycache__/japanese.cpython-310.pyc +0 -0
  40. text/__pycache__/symbols.cpython-310.pyc +0 -0
  41. text/__pycache__/tone_sandhi.cpython-310.pyc +0 -0
  42. text/chinese.py +4 -2
  43. text/cmudict.rep +0 -0
  44. text/engdict-hot.rep +2 -1
  45. text/engdict_cache.pickle +2 -2
  46. text/english.py +171 -43
  47. output_audio.wav → text/namedict_cache.pickle +2 -2
  48. text/tone_sandhi.py +1 -0
  49. text/zh_normalization/__pycache__/__init__.cpython-310.pyc +0 -0
  50. text/zh_normalization/__pycache__/char_convert.cpython-310.pyc +0 -0
AR/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/AR/__pycache__/__init__.cpython-310.pyc and b/AR/__pycache__/__init__.cpython-310.pyc differ
 
AR/models/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/AR/models/__pycache__/__init__.cpython-310.pyc and b/AR/models/__pycache__/__init__.cpython-310.pyc differ
 
AR/models/__pycache__/t2s_lightning_module.cpython-310.pyc CHANGED
Binary files a/AR/models/__pycache__/t2s_lightning_module.cpython-310.pyc and b/AR/models/__pycache__/t2s_lightning_module.cpython-310.pyc differ
 
AR/models/__pycache__/t2s_model.cpython-310.pyc CHANGED
Binary files a/AR/models/__pycache__/t2s_model.cpython-310.pyc and b/AR/models/__pycache__/t2s_model.cpython-310.pyc differ
 
AR/models/__pycache__/utils.cpython-310.pyc CHANGED
Binary files a/AR/models/__pycache__/utils.cpython-310.pyc and b/AR/models/__pycache__/utils.cpython-310.pyc differ
 
AR/modules/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/__init__.cpython-310.pyc and b/AR/modules/__pycache__/__init__.cpython-310.pyc differ
 
AR/modules/__pycache__/activation.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/activation.cpython-310.pyc and b/AR/modules/__pycache__/activation.cpython-310.pyc differ
 
AR/modules/__pycache__/embedding.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/embedding.cpython-310.pyc and b/AR/modules/__pycache__/embedding.cpython-310.pyc differ
 
AR/modules/__pycache__/lr_schedulers.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/lr_schedulers.cpython-310.pyc and b/AR/modules/__pycache__/lr_schedulers.cpython-310.pyc differ
 
AR/modules/__pycache__/optim.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/optim.cpython-310.pyc and b/AR/modules/__pycache__/optim.cpython-310.pyc differ
 
AR/modules/__pycache__/patched_mha_with_cache.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/patched_mha_with_cache.cpython-310.pyc and b/AR/modules/__pycache__/patched_mha_with_cache.cpython-310.pyc differ
 
AR/modules/__pycache__/scaling.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/scaling.cpython-310.pyc and b/AR/modules/__pycache__/scaling.cpython-310.pyc differ
 
AR/modules/__pycache__/transformer.cpython-310.pyc CHANGED
Binary files a/AR/modules/__pycache__/transformer.cpython-310.pyc and b/AR/modules/__pycache__/transformer.cpython-310.pyc differ
 
__pycache__/download.cpython-310.pyc CHANGED
Binary files a/__pycache__/download.cpython-310.pyc and b/__pycache__/download.cpython-310.pyc differ
 
__pycache__/info.cpython-310.pyc CHANGED
Binary files a/__pycache__/info.cpython-310.pyc and b/__pycache__/info.cpython-310.pyc differ
 
__pycache__/my_utils.cpython-310.pyc CHANGED
Binary files a/__pycache__/my_utils.cpython-310.pyc and b/__pycache__/my_utils.cpython-310.pyc differ
 
__pycache__/utils.cpython-310.pyc CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
 
app.py CHANGED
@@ -790,14 +790,21 @@ with gr.Blocks(theme='Kasien/ali_theme_custom') as app:
790
  chinese_choice = gr.Radio(chinese_models, label="ZH",scale=2)
791
  japanese_choice = gr.Radio(japanese_models, label="JA",scale=4)
792
 
793
- plsh='Support【English/中文/日本語】,Input text you like / 輸入文字 /テキストを入力する'
 
 
 
 
 
 
 
794
  limit='Max 70 words. Excess will be ignored./单次最多处理120字左右,多余的会被忽略'
795
 
796
  gr.HTML('''
797
  <b>Input Text/输入文字</b>''')
798
  with gr.Row():
799
  with gr.Column(scale=2):
800
- model_name = gr.Textbox(label="Seleted Model/已选模型", value=default_model_name, scale=1)
801
  text_language = gr.Textbox(
802
  label="Language for input text/生成语言",
803
  info='Automatic detection of input language type.',scale=1,interactive=False
@@ -851,8 +858,12 @@ with gr.Blocks(theme='Kasien/ali_theme_custom') as app:
851
  If it generated silence, please try again./如果生成了空白声音,请重试
852
  <br><br><br><br>
853
  <h1 style="font-size: 25px;">Clone custom Voice/克隆自定义声音</h1>
854
- <p style="margin-bottom: 10px; font-size: 100%">Need 3~10s audio.This involves voice-to-text conversion followed by text-to-voice conversion, so it takes longer time<br>
855
- 需要3~10秒语音,这个会涉及语音转文字,之后再转语音,所以耗时比较久
 
 
 
 
856
  </p>''')
857
 
858
  with gr.Row():
 
790
  chinese_choice = gr.Radio(chinese_models, label="ZH",scale=2)
791
  japanese_choice = gr.Radio(japanese_models, label="JA",scale=4)
792
 
793
+ plsh='''
794
+ Support【English/中文/日本語】,Input text here / 在这輸入文字 /ここにテキストを入力する。
795
+
796
+ If you don't know what to input, you can click the dice on the right, and random text will appear.
797
+ 如果你不知道输入什么,可以点击右边的骰子,会出现随机文本。
798
+ 入力するものがわからない場合は、右側のサイコロをクリックすると、ランダムなテキストが表示されます。
799
+
800
+ '''
801
  limit='Max 70 words. Excess will be ignored./单次最多处理120字左右,多余的会被忽略'
802
 
803
  gr.HTML('''
804
  <b>Input Text/输入文字</b>''')
805
  with gr.Row():
806
  with gr.Column(scale=2):
807
+ model_name = gr.Textbox(label="Seleted Model/已选模型", value=default_model_name, interactive=False,scale=1,)
808
  text_language = gr.Textbox(
809
  label="Language for input text/生成语言",
810
  info='Automatic detection of input language type.',scale=1,interactive=False
 
858
  If it generated silence, please try again./如果生成了空白声音,请重试
859
  <br><br><br><br>
860
  <h1 style="font-size: 25px;">Clone custom Voice/克隆自定义声音</h1>
861
+ <p style="margin-bottom: 10px; font-size: 100%">
862
+ 需要3~10秒语音,克隆后的声音和原音相似度80%以上<br>
863
+ Requires 3-10 seconds of voice input. The cloned voice will have a similarity of 80% or above compared to the original.<br>
864
+ 3~10秒の音声入力が必要です。クローンされた音声は、オリジナルと80%以上の類似性があります。
865
+
866
+
867
  </p>''')
868
 
869
  with gr.Row():
feature_extractor/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/feature_extractor/__pycache__/__init__.cpython-310.pyc and b/feature_extractor/__pycache__/__init__.cpython-310.pyc differ
 
feature_extractor/__pycache__/cnhubert.cpython-310.pyc CHANGED
Binary files a/feature_extractor/__pycache__/cnhubert.cpython-310.pyc and b/feature_extractor/__pycache__/cnhubert.cpython-310.pyc differ
 
feature_extractor/__pycache__/whisper_enc.cpython-310.pyc CHANGED
Binary files a/feature_extractor/__pycache__/whisper_enc.cpython-310.pyc and b/feature_extractor/__pycache__/whisper_enc.cpython-310.pyc differ
 
gweight.txt DELETED
@@ -1 +0,0 @@
1
- /content/Multi-voice-TTS-GPT-SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
 
 
module/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/__init__.cpython-310.pyc and b/module/__pycache__/__init__.cpython-310.pyc differ
 
module/__pycache__/attentions.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/attentions.cpython-310.pyc and b/module/__pycache__/attentions.cpython-310.pyc differ
 
module/__pycache__/commons.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/commons.cpython-310.pyc and b/module/__pycache__/commons.cpython-310.pyc differ
 
module/__pycache__/core_vq.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/core_vq.cpython-310.pyc and b/module/__pycache__/core_vq.cpython-310.pyc differ
 
module/__pycache__/mel_processing.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/mel_processing.cpython-310.pyc and b/module/__pycache__/mel_processing.cpython-310.pyc differ
 
module/__pycache__/models.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/models.cpython-310.pyc and b/module/__pycache__/models.cpython-310.pyc differ
 
module/__pycache__/modules.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/modules.cpython-310.pyc and b/module/__pycache__/modules.cpython-310.pyc differ
 
module/__pycache__/mrte_model.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/mrte_model.cpython-310.pyc and b/module/__pycache__/mrte_model.cpython-310.pyc differ
 
module/__pycache__/quantize.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/quantize.cpython-310.pyc and b/module/__pycache__/quantize.cpython-310.pyc differ
 
module/__pycache__/transforms.cpython-310.pyc CHANGED
Binary files a/module/__pycache__/transforms.cpython-310.pyc and b/module/__pycache__/transforms.cpython-310.pyc differ
 
requirements.txt CHANGED
@@ -27,4 +27,5 @@ torch
27
  pyicu
28
  morfessor
29
  pycld2
30
- polyglot
 
 
27
  pyicu
28
  morfessor
29
  pycld2
30
+ polyglot
31
+ wordsegment
sweight.txt DELETED
@@ -1 +0,0 @@
1
- /content/Multi-voice-TTS-GPT-SoVITS/pretrained_models/s2G488k.pth
 
 
text/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/__init__.cpython-310.pyc and b/text/__pycache__/__init__.cpython-310.pyc differ
 
text/__pycache__/chinese.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/chinese.cpython-310.pyc and b/text/__pycache__/chinese.cpython-310.pyc differ
 
text/__pycache__/cleaner.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/cleaner.cpython-310.pyc and b/text/__pycache__/cleaner.cpython-310.pyc differ
 
text/__pycache__/english.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/english.cpython-310.pyc and b/text/__pycache__/english.cpython-310.pyc differ
 
text/__pycache__/japanese.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/japanese.cpython-310.pyc and b/text/__pycache__/japanese.cpython-310.pyc differ
 
text/__pycache__/symbols.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/symbols.cpython-310.pyc and b/text/__pycache__/symbols.cpython-310.pyc differ
 
text/__pycache__/tone_sandhi.cpython-310.pyc CHANGED
Binary files a/text/__pycache__/tone_sandhi.cpython-310.pyc and b/text/__pycache__/tone_sandhi.cpython-310.pyc differ
 
text/chinese.py CHANGED
@@ -30,10 +30,12 @@ rep_map = {
30
  "\n": ".",
31
  "·": ",",
32
  "、": ",",
33
- # "...": "…",
34
  "$": ".",
35
  "/": ",",
36
  "—": "-",
 
 
37
  }
38
 
39
  tone_modifier = ToneSandhi()
@@ -169,4 +171,4 @@ if __name__ == "__main__":
169
 
170
  # # 示例用法
171
  # text = "这是一个示例文本:,你好!这是一个测试..."
172
- # print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
 
30
  "\n": ".",
31
  "·": ",",
32
  "、": ",",
33
+ "...": "…",
34
  "$": ".",
35
  "/": ",",
36
  "—": "-",
37
+ "~": "…",
38
+ "~":"…",
39
  }
40
 
41
  tone_modifier = ToneSandhi()
 
171
 
172
  # # 示例用法
173
  # text = "这是一个示例文本:,你好!这是一个测试..."
174
+ # print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
text/cmudict.rep CHANGED
The diff for this file is too large to render. See raw diff
 
text/engdict-hot.rep CHANGED
@@ -1 +1,2 @@
1
- CHATGPT CH AE1 T JH IY1 P IY1 T IY1
 
 
1
+ CHATGPT CH AE1 T JH IY1 P IY1 T IY1
2
+ JSON JH EY1 S AH0 N
text/engdict_cache.pickle CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36ea4c34f2ca534437a39db0fc4092efa0595093d3587bafc33fc50468ef53f9
3
- size 6530139
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bff9393f4b192d873a11335efc8f124771087b6dc847d34fd240c2846889d2b
3
+ size 5965909
text/english.py CHANGED
@@ -1,18 +1,26 @@
1
  import pickle
2
  import os
3
  import re
 
4
  from g2p_en import G2p
5
 
6
  from string import punctuation
7
 
8
  from text import symbols
9
 
 
 
 
 
 
 
 
10
  current_file_path = os.path.dirname(__file__)
11
  CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
12
  CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
13
  CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
14
  CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
15
- _g2p = G2p()
16
 
17
  arpa = {
18
  "AH0",
@@ -90,7 +98,7 @@ arpa = {
90
 
91
 
92
  def replace_phs(phs):
93
- rep_map = {";": ",", ":": ",", "'": "-", '"': "-"}
94
  phs_new = []
95
  for ph in phs:
96
  if ph in symbols:
@@ -112,7 +120,7 @@ def read_dict():
112
  if line_index >= start_line:
113
  line = line.strip()
114
  word_split = line.split(" ")
115
- word = word_split[0]
116
 
117
  syllable_split = word_split[1].split(" - ")
118
  g2p_dict[word] = []
@@ -132,16 +140,11 @@ def read_dict_new():
132
  line = f.readline()
133
  line_index = 1
134
  while line:
135
- if line_index >= 49:
136
  line = line.strip()
137
  word_split = line.split(" ")
138
- word = word_split[0]
139
-
140
- syllable_split = word_split[1].split(" - ")
141
- g2p_dict[word] = []
142
- for syllable in syllable_split:
143
- phone_split = syllable.split(" ")
144
- g2p_dict[word].append(phone_split)
145
 
146
  line_index = line_index + 1
147
  line = f.readline()
@@ -153,14 +156,16 @@ def read_dict_new():
153
  if line_index >= 0:
154
  line = line.strip()
155
  word_split = line.split(" ")
156
- word = word_split[0]
157
  if word not in g2p_dict:
158
- g2p_dict[word] = []
159
- g2p_dict[word].append(word_split[1:])
160
 
161
  line_index = line_index + 1
162
  line = f.readline()
163
 
 
 
 
164
  with open(CMU_DICT_HOT_PATH) as f:
165
  line = f.readline()
166
  line_index = 1
@@ -168,14 +173,13 @@ def read_dict_new():
168
  if line_index >= 0:
169
  line = line.strip()
170
  word_split = line.split(" ")
171
- word = word_split[0]
172
- #if word not in g2p_dict:
173
- g2p_dict[word] = []
174
- g2p_dict[word].append(word_split[1:])
175
 
176
  line_index = line_index + 1
177
  line = f.readline()
178
-
179
  return g2p_dict
180
 
181
 
@@ -192,43 +196,167 @@ def get_dict():
192
  g2p_dict = read_dict_new()
193
  cache_dict(g2p_dict, CACHE_PATH)
194
 
 
 
195
  return g2p_dict
196
 
197
 
198
- eng_dict = get_dict()
 
 
 
 
 
 
 
199
 
200
 
201
  def text_normalize(text):
202
  # todo: eng text normalize
203
- return text.replace(";", ",")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
 
 
205
 
206
- def g2p(text):
207
- phones = []
208
- words = re.split(r"([,;.\-\?\!\s+])", text)
209
- for w in words:
210
- if w.upper() in eng_dict:
211
- phns = eng_dict[w.upper()]
212
- for ph in phns:
213
- phones += ph
214
- else:
215
- phone_list = list(filter(lambda p: p != " ", _g2p(w)))
216
- for ph in phone_list:
217
- if ph in arpa:
218
- phones.append(ph)
 
 
 
 
 
 
 
 
219
  else:
220
- phones.append(ph)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
  return replace_phs(phones)
223
 
224
 
225
  if __name__ == "__main__":
226
- # print(get_dict())
227
  print(g2p("hello"))
228
- print(g2p("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
229
- # all_phones = set()
230
- # for k, syllables in eng_dict.items():
231
- # for group in syllables:
232
- # for ph in group:
233
- # all_phones.add(ph)
234
- # print(all_phones)
 
1
  import pickle
2
  import os
3
  import re
4
+ import wordsegment
5
  from g2p_en import G2p
6
 
7
  from string import punctuation
8
 
9
  from text import symbols
10
 
11
+ import unicodedata
12
+ from builtins import str as unicode
13
+ from g2p_en.expand import normalize_numbers
14
+ from nltk.tokenize import TweetTokenizer
15
+ word_tokenize = TweetTokenizer().tokenize
16
+ from nltk import pos_tag
17
+
18
  current_file_path = os.path.dirname(__file__)
19
  CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
20
  CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
21
  CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
22
  CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
23
+ NAMECACHE_PATH = os.path.join(current_file_path, "namedict_cache.pickle")
24
 
25
  arpa = {
26
  "AH0",
 
98
 
99
 
100
  def replace_phs(phs):
101
+ rep_map = {"'": "-"}
102
  phs_new = []
103
  for ph in phs:
104
  if ph in symbols:
 
120
  if line_index >= start_line:
121
  line = line.strip()
122
  word_split = line.split(" ")
123
+ word = word_split[0].lower()
124
 
125
  syllable_split = word_split[1].split(" - ")
126
  g2p_dict[word] = []
 
140
  line = f.readline()
141
  line_index = 1
142
  while line:
143
+ if line_index >= 57:
144
  line = line.strip()
145
  word_split = line.split(" ")
146
+ word = word_split[0].lower()
147
+ g2p_dict[word] = [word_split[1].split(" ")]
 
 
 
 
 
148
 
149
  line_index = line_index + 1
150
  line = f.readline()
 
156
  if line_index >= 0:
157
  line = line.strip()
158
  word_split = line.split(" ")
159
+ word = word_split[0].lower()
160
  if word not in g2p_dict:
161
+ g2p_dict[word] = [word_split[1:]]
 
162
 
163
  line_index = line_index + 1
164
  line = f.readline()
165
 
166
+ return g2p_dict
167
+
168
+ def hot_reload_hot(g2p_dict):
169
  with open(CMU_DICT_HOT_PATH) as f:
170
  line = f.readline()
171
  line_index = 1
 
173
  if line_index >= 0:
174
  line = line.strip()
175
  word_split = line.split(" ")
176
+ word = word_split[0].lower()
177
+ # 自定义发音词直接覆盖字典
178
+ g2p_dict[word] = [word_split[1:]]
 
179
 
180
  line_index = line_index + 1
181
  line = f.readline()
182
+
183
  return g2p_dict
184
 
185
 
 
196
  g2p_dict = read_dict_new()
197
  cache_dict(g2p_dict, CACHE_PATH)
198
 
199
+ g2p_dict = hot_reload_hot(g2p_dict)
200
+
201
  return g2p_dict
202
 
203
 
204
+ def get_namedict():
205
+ if os.path.exists(NAMECACHE_PATH):
206
+ with open(NAMECACHE_PATH, "rb") as pickle_file:
207
+ name_dict = pickle.load(pickle_file)
208
+ else:
209
+ name_dict = {}
210
+
211
+ return name_dict
212
 
213
 
214
  def text_normalize(text):
215
  # todo: eng text normalize
216
+ # 适配中文及 g2p_en 标点
217
+ rep_map = {
218
+ "[;::,;]": ",",
219
+ '["’]': "'",
220
+ "。": ".",
221
+ "!": "!",
222
+ "?": "?",
223
+ }
224
+ for p, r in rep_map.items():
225
+ text = re.sub(p, r, text)
226
+
227
+ # 来自 g2p_en 文本格式化处理
228
+ # 增加大写兼容
229
+ text = unicode(text)
230
+ text = normalize_numbers(text)
231
+ text = ''.join(char for char in unicodedata.normalize('NFD', text)
232
+ if unicodedata.category(char) != 'Mn') # Strip accents
233
+ text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
234
+ text = re.sub(r"(?i)i\.e\.", "that is", text)
235
+ text = re.sub(r"(?i)e\.g\.", "for example", text)
236
+
237
+ return text
238
+
239
+
240
+ class en_G2p(G2p):
241
+ def __init__(self):
242
+ super().__init__()
243
+ # 分词初始化
244
+ wordsegment.load()
245
+
246
+ # 扩展过时字典, 添加姓名字典
247
+ self.cmu = get_dict()
248
+ self.namedict = get_namedict()
249
+
250
+ # 剔除读音错误的几个缩写
251
+ for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
252
+ del self.cmu[word.lower()]
253
+
254
+ # 修正多音字
255
+ self.homograph2features["read"] = (['R', 'IY1', 'D'], ['R', 'EH1', 'D'], 'VBP')
256
+ self.homograph2features["complex"] = (['K', 'AH0', 'M', 'P', 'L', 'EH1', 'K', 'S'], ['K', 'AA1', 'M', 'P', 'L', 'EH0', 'K', 'S'], 'JJ')
257
+
258
+
259
+ def __call__(self, text):
260
+ # tokenization
261
+ words = word_tokenize(text)
262
+ tokens = pos_tag(words) # tuples of (word, tag)
263
+
264
+ # steps
265
+ prons = []
266
+ for o_word, pos in tokens:
267
+ # 还原 g2p_en 小写操作逻辑
268
+ word = o_word.lower()
269
+
270
+ if re.search("[a-z]", word) is None:
271
+ pron = [word]
272
+ # 先把单字母推出去
273
+ elif len(word) == 1:
274
+ # 单读 A 发音修正, 这里需要原格式 o_word 判断大写
275
+ if o_word == "A":
276
+ pron = ['EY1']
277
+ else:
278
+ pron = self.cmu[word][0]
279
+ # g2p_en 原版多音字处理
280
+ elif word in self.homograph2features: # Check homograph
281
+ pron1, pron2, pos1 = self.homograph2features[word]
282
+ if pos.startswith(pos1):
283
+ pron = pron1
284
+ # pos1比pos长仅出现在read
285
+ elif len(pos) < len(pos1) and pos == pos1[:len(pos)]:
286
+ pron = pron1
287
+ else:
288
+ pron = pron2
289
+ else:
290
+ # 递归查找预测
291
+ pron = self.qryword(o_word)
292
 
293
+ prons.extend(pron)
294
+ prons.extend([" "])
295
 
296
+ return prons[:-1]
297
+
298
+
299
+ def qryword(self, o_word):
300
+ word = o_word.lower()
301
+
302
+ # 查字典, 单字母除外
303
+ if len(word) > 1 and word in self.cmu: # lookup CMU dict
304
+ return self.cmu[word][0]
305
+
306
+ # 单词仅首字母大写时查找姓名字典
307
+ if o_word.istitle() and word in self.namedict:
308
+ return self.namedict[word][0]
309
+
310
+ # oov 长度小于等于 3 直接读字母
311
+ if len(word) <= 3:
312
+ phones = []
313
+ for w in word:
314
+ # 单读 A 发音修正, 此处不存在大写的情况
315
+ if w == "a":
316
+ phones.extend(['EY1'])
317
  else:
318
+ phones.extend(self.cmu[w][0])
319
+ return phones
320
+
321
+ # 尝试分离所有格
322
+ if re.match(r"^([a-z]+)('s)$", word):
323
+ phones = self.qryword(word[:-2])
324
+ # P T K F TH HH 无声辅音结尾 's 发 ['S']
325
+ if phones[-1] in ['P', 'T', 'K', 'F', 'TH', 'HH']:
326
+ phones.extend(['S'])
327
+ # S Z SH ZH CH JH 擦声结尾 's 发 ['IH1', 'Z'] 或 ['AH0', 'Z']
328
+ elif phones[-1] in ['S', 'Z', 'SH', 'ZH', 'CH', 'JH']:
329
+ phones.extend(['AH0', 'Z'])
330
+ # B D G DH V M N NG L R W Y 有声辅音结尾 's 发 ['Z']
331
+ # AH0 AH1 AH2 EY0 EY1 EY2 AE0 AE1 AE2 EH0 EH1 EH2 OW0 OW1 OW2 UH0 UH1 UH2 IY0 IY1 IY2 AA0 AA1 AA2 AO0 AO1 AO2
332
+ # ER ER0 ER1 ER2 UW0 UW1 UW2 AY0 AY1 AY2 AW0 AW1 AW2 OY0 OY1 OY2 IH IH0 IH1 IH2 元音结尾 's 发 ['Z']
333
+ else:
334
+ phones.extend(['Z'])
335
+ return phones
336
+
337
+ # 尝试进行分词,应对复合词
338
+ comps = wordsegment.segment(word.lower())
339
+
340
+ # 无法分词的送回去预测
341
+ if len(comps)==1:
342
+ return self.predict(word)
343
+
344
+ # 可以分词的递归处理
345
+ return [phone for comp in comps for phone in self.qryword(comp)]
346
+
347
+
348
+ _g2p = en_G2p()
349
+
350
+
351
+ def g2p(text):
352
+ # g2p_en 整段推理,剔除不存在的arpa返回
353
+ phone_list = _g2p(text)
354
+ phones = [ph if ph != "<unk>" else "UNK" for ph in phone_list if ph not in [" ", "<pad>", "UW", "</s>", "<s>"]]
355
 
356
  return replace_phs(phones)
357
 
358
 
359
  if __name__ == "__main__":
 
360
  print(g2p("hello"))
361
+ print(g2p(text_normalize("e.g. I used openai's AI tool to draw a picture.")))
362
+ print(g2p(text_normalize("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")))
 
 
 
 
 
output_audio.wav → text/namedict_cache.pickle RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b97115a0643627c7837d43c7a06496cff5cb85f72f2b540d282ded656808700
3
- size 311084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559552094c4a6e995213e3fa586330e078ef8cb3a7a95a3109e945111cd2bfc1
3
+ size 760663
text/tone_sandhi.py CHANGED
@@ -672,6 +672,7 @@ class ToneSandhi:
672
  and i + 1 < len(seg)
673
  and seg[i - 1][0] == seg[i + 1][0]
674
  and seg[i - 1][1] == "v"
 
675
  ):
676
  new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
677
  else:
 
672
  and i + 1 < len(seg)
673
  and seg[i - 1][0] == seg[i + 1][0]
674
  and seg[i - 1][1] == "v"
675
+ and seg[i + 1][1] == "v"
676
  ):
677
  new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
678
  else:
text/zh_normalization/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/text/zh_normalization/__pycache__/__init__.cpython-310.pyc and b/text/zh_normalization/__pycache__/__init__.cpython-310.pyc differ
 
text/zh_normalization/__pycache__/char_convert.cpython-310.pyc CHANGED
Binary files a/text/zh_normalization/__pycache__/char_convert.cpython-310.pyc and b/text/zh_normalization/__pycache__/char_convert.cpython-310.pyc differ