Spaces:
Build error
Build error
Upload 6 files
Browse files- text/LICENSE +19 -0
- text/__init__.py +66 -0
- text/__pycache__/__init__.cpython-38.pyc +0 -0
- text/__pycache__/symbols.cpython-38.pyc +0 -0
- text/cleaners.py +100 -0
- text/symbols.py +16 -0
text/LICENSE
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2017 Keith Ito
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4 |
+
of this software and associated documentation files (the "Software"), to deal
|
5 |
+
in the Software without restriction, including without limitation the rights
|
6 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7 |
+
copies of the Software, and to permit persons to whom the Software is
|
8 |
+
furnished to do so, subject to the following conditions:
|
9 |
+
|
10 |
+
The above copyright notice and this permission notice shall be included in
|
11 |
+
all copies or substantial portions of the Software.
|
12 |
+
|
13 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19 |
+
THE SOFTWARE.
|
text/__init__.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("../")
|
3 |
+
from viphoneme import syms, vi2IPA_split
|
4 |
+
|
5 |
+
symbols = syms
|
6 |
+
|
7 |
+
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
8 |
+
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
|
9 |
+
|
10 |
+
|
11 |
+
def sequence_to_text(sequence):
|
12 |
+
|
13 |
+
result = ''
|
14 |
+
for symbol_id in sequence:
|
15 |
+
if symbol_id in _id_to_symbol:
|
16 |
+
result += _id_to_symbol[symbol_id]
|
17 |
+
return result
|
18 |
+
|
19 |
+
def text_to_sequence(text, cleaner_names):
|
20 |
+
|
21 |
+
sequence = []
|
22 |
+
text = text.replace('\s+',' ').lower()
|
23 |
+
phon = vi2IPA_split(text,"/")
|
24 |
+
phon = phon.split("/")[1:]
|
25 |
+
|
26 |
+
eol = -1
|
27 |
+
for i,p in reversed(list(enumerate(phon))):
|
28 |
+
if p not in ["..",""," ","."," "]:
|
29 |
+
eol = i
|
30 |
+
break
|
31 |
+
phones = phon[:i+1]+[" ","."]
|
32 |
+
phones_id =[]
|
33 |
+
for i in phones:
|
34 |
+
if i in _symbol_to_id:
|
35 |
+
phones_id.append(_symbol_to_id[i])
|
36 |
+
#phones_id = [_symbol_to_id[i] for i in phones]
|
37 |
+
sequence.extend(phones_id)
|
38 |
+
|
39 |
+
return sequence
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
def cleaned_text_to_sequence(cleaned_text):
|
44 |
+
|
45 |
+
sequence = []
|
46 |
+
phon = cleaned_text.split("/")[1:]
|
47 |
+
|
48 |
+
eol = -1
|
49 |
+
for i,p in reversed(list(enumerate(phon))):
|
50 |
+
if p not in ["..",""," ","."," "]:
|
51 |
+
eol = i
|
52 |
+
break
|
53 |
+
phones = phon[:i+1]+[" ","."]
|
54 |
+
phones_id =[]
|
55 |
+
for i in phones:
|
56 |
+
if i in _symbol_to_id:
|
57 |
+
phones_id.append(_symbol_to_id[i])
|
58 |
+
#phones_id = [_symbol_to_id[i] for i in phones]
|
59 |
+
sequence.extend(phones_id)
|
60 |
+
|
61 |
+
return sequence
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
text = "Nơi lưu trữ và cập nhật các bài viết, hình ảnh từ Tuấn Khanh"
|
65 |
+
seq = text_to_sequence(text, "")
|
66 |
+
print(seq)
|
text/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (1.77 kB). View file
|
|
text/__pycache__/symbols.cpython-38.pyc
ADDED
Binary file (646 Bytes). View file
|
|
text/cleaners.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" from https://github.com/keithito/tacotron """
|
2 |
+
|
3 |
+
'''
|
4 |
+
Cleaners are transformations that run over the input text at both training and eval time.
|
5 |
+
|
6 |
+
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
|
7 |
+
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
|
8 |
+
1. "english_cleaners" for English text
|
9 |
+
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
|
10 |
+
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
|
11 |
+
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
|
12 |
+
the symbols in symbols.py to match your data).
|
13 |
+
'''
|
14 |
+
|
15 |
+
import re
|
16 |
+
from unidecode import unidecode
|
17 |
+
from phonemizer import phonemize
|
18 |
+
|
19 |
+
|
20 |
+
# Regular expression matching whitespace:
|
21 |
+
_whitespace_re = re.compile(r'\s+')
|
22 |
+
|
23 |
+
# List of (regular expression, replacement) pairs for abbreviations:
|
24 |
+
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
|
25 |
+
('mrs', 'misess'),
|
26 |
+
('mr', 'mister'),
|
27 |
+
('dr', 'doctor'),
|
28 |
+
('st', 'saint'),
|
29 |
+
('co', 'company'),
|
30 |
+
('jr', 'junior'),
|
31 |
+
('maj', 'major'),
|
32 |
+
('gen', 'general'),
|
33 |
+
('drs', 'doctors'),
|
34 |
+
('rev', 'reverend'),
|
35 |
+
('lt', 'lieutenant'),
|
36 |
+
('hon', 'honorable'),
|
37 |
+
('sgt', 'sergeant'),
|
38 |
+
('capt', 'captain'),
|
39 |
+
('esq', 'esquire'),
|
40 |
+
('ltd', 'limited'),
|
41 |
+
('col', 'colonel'),
|
42 |
+
('ft', 'fort'),
|
43 |
+
]]
|
44 |
+
|
45 |
+
|
46 |
+
def expand_abbreviations(text):
|
47 |
+
for regex, replacement in _abbreviations:
|
48 |
+
text = re.sub(regex, replacement, text)
|
49 |
+
return text
|
50 |
+
|
51 |
+
|
52 |
+
def expand_numbers(text):
|
53 |
+
return normalize_numbers(text)
|
54 |
+
|
55 |
+
|
56 |
+
def lowercase(text):
|
57 |
+
return text.lower()
|
58 |
+
|
59 |
+
|
60 |
+
def collapse_whitespace(text):
|
61 |
+
return re.sub(_whitespace_re, ' ', text)
|
62 |
+
|
63 |
+
|
64 |
+
def convert_to_ascii(text):
|
65 |
+
return unidecode(text)
|
66 |
+
|
67 |
+
|
68 |
+
def basic_cleaners(text):
|
69 |
+
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
|
70 |
+
text = lowercase(text)
|
71 |
+
text = collapse_whitespace(text)
|
72 |
+
return text
|
73 |
+
|
74 |
+
|
75 |
+
def transliteration_cleaners(text):
|
76 |
+
'''Pipeline for non-English text that transliterates to ASCII.'''
|
77 |
+
text = convert_to_ascii(text)
|
78 |
+
text = lowercase(text)
|
79 |
+
text = collapse_whitespace(text)
|
80 |
+
return text
|
81 |
+
|
82 |
+
|
83 |
+
def english_cleaners(text):
|
84 |
+
'''Pipeline for English text, including abbreviation expansion.'''
|
85 |
+
text = convert_to_ascii(text)
|
86 |
+
text = lowercase(text)
|
87 |
+
text = expand_abbreviations(text)
|
88 |
+
phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
|
89 |
+
phonemes = collapse_whitespace(phonemes)
|
90 |
+
return phonemes
|
91 |
+
|
92 |
+
|
93 |
+
def english_cleaners2(text):
|
94 |
+
'''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
|
95 |
+
text = convert_to_ascii(text)
|
96 |
+
text = lowercase(text)
|
97 |
+
text = expand_abbreviations(text)
|
98 |
+
phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=True)
|
99 |
+
phonemes = collapse_whitespace(phonemes)
|
100 |
+
return phonemes
|
text/symbols.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" from https://github.com/keithito/tacotron """
|
2 |
+
|
3 |
+
'''
|
4 |
+
Defines the set of symbols used in text input to the model.
|
5 |
+
'''
|
6 |
+
_pad = '_'
|
7 |
+
_punctuation = ';:,.!?¡¿—…"«»“” '
|
8 |
+
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
|
9 |
+
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
|
10 |
+
|
11 |
+
|
12 |
+
# Export all symbols:
|
13 |
+
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
|
14 |
+
|
15 |
+
# Special symbol ids
|
16 |
+
SPACE_ID = symbols.index(" ")
|