Datasets:
michaelnetbiz
commited on
Commit
•
95ed5d1
1
Parent(s):
7170b11
Add text normalization
Browse files- leviticus/__init__.py +125 -0
- requirements.txt +5 -0
- scripts/prep_push_to_hf.py +3 -0
leviticus/__init__.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
text normalization functions
|
3 |
+
"""
|
4 |
+
|
5 |
+
from unidecode import unidecode
|
6 |
+
import inflect
|
7 |
+
import re
|
8 |
+
|
9 |
+
_inflect = inflect.engine()
|
10 |
+
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
|
11 |
+
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
|
12 |
+
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
|
13 |
+
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
|
14 |
+
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
|
15 |
+
_number_re = re.compile(r'[0-9]+')
|
16 |
+
|
17 |
+
# Regular expression matching whitespace:
|
18 |
+
_whitespace_re = re.compile(r'\s+')
|
19 |
+
|
20 |
+
# List of (regular expression, replacement) pairs for abbreviations:
|
21 |
+
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
|
22 |
+
('mrs', 'misess'),
|
23 |
+
('mr', 'mister'),
|
24 |
+
('dr', 'doctor'),
|
25 |
+
('st', 'saint'),
|
26 |
+
('co', 'company'),
|
27 |
+
('jr', 'junior'),
|
28 |
+
('maj', 'major'),
|
29 |
+
('gen', 'general'),
|
30 |
+
('drs', 'doctors'),
|
31 |
+
('rev', 'reverend'),
|
32 |
+
('lt', 'lieutenant'),
|
33 |
+
('hon', 'honorable'),
|
34 |
+
('sgt', 'sergeant'),
|
35 |
+
('capt', 'captain'),
|
36 |
+
('esq', 'esquire'),
|
37 |
+
('ltd', 'limited'),
|
38 |
+
('col', 'colonel'),
|
39 |
+
('ft', 'fort'),
|
40 |
+
]]
|
41 |
+
|
42 |
+
|
43 |
+
def _remove_commas(m):
|
44 |
+
return m.group(1).replace(',', '')
|
45 |
+
|
46 |
+
|
47 |
+
def _expand_decimal_point(m):
|
48 |
+
return m.group(1).replace('.', ' point ')
|
49 |
+
|
50 |
+
|
51 |
+
def _expand_dollars(m):
|
52 |
+
match = m.group(1)
|
53 |
+
parts = match.split('.')
|
54 |
+
if len(parts) > 2:
|
55 |
+
return match + ' dollars' # Unexpected format
|
56 |
+
dollars = int(parts[0]) if parts[0] else 0
|
57 |
+
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
|
58 |
+
if dollars and cents:
|
59 |
+
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
|
60 |
+
cent_unit = 'cent' if cents == 1 else 'cents'
|
61 |
+
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
|
62 |
+
elif dollars:
|
63 |
+
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
|
64 |
+
return '%s %s' % (dollars, dollar_unit)
|
65 |
+
elif cents:
|
66 |
+
cent_unit = 'cent' if cents == 1 else 'cents'
|
67 |
+
return '%s %s' % (cents, cent_unit)
|
68 |
+
else:
|
69 |
+
return 'zero dollars'
|
70 |
+
|
71 |
+
|
72 |
+
def _expand_ordinal(m):
|
73 |
+
return _inflect.number_to_words(m.group(0))
|
74 |
+
|
75 |
+
|
76 |
+
def _expand_number(m):
|
77 |
+
num = int(m.group(0))
|
78 |
+
if num > 1000 and num < 3000:
|
79 |
+
if num == 2000:
|
80 |
+
return 'two thousand'
|
81 |
+
elif num > 2000 and num < 2010:
|
82 |
+
return 'two thousand ' + _inflect.number_to_words(num % 100)
|
83 |
+
elif num % 100 == 0:
|
84 |
+
return _inflect.number_to_words(num // 100) + ' hundred'
|
85 |
+
else:
|
86 |
+
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
|
87 |
+
else:
|
88 |
+
return _inflect.number_to_words(num, andword='')
|
89 |
+
|
90 |
+
|
91 |
+
def normalize_numbers(text):
|
92 |
+
text = re.sub(_comma_number_re, _remove_commas, text)
|
93 |
+
text = re.sub(_pounds_re, r'\1 pounds', text)
|
94 |
+
text = re.sub(_dollars_re, _expand_dollars, text)
|
95 |
+
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
|
96 |
+
text = re.sub(_ordinal_re, _expand_ordinal, text)
|
97 |
+
text = re.sub(_number_re, _expand_number, text)
|
98 |
+
return text
|
99 |
+
|
100 |
+
|
101 |
+
def expand_abbreviations(text):
|
102 |
+
for regex, replacement in _abbreviations:
|
103 |
+
text = re.sub(regex, replacement, text)
|
104 |
+
return text
|
105 |
+
|
106 |
+
|
107 |
+
def expand_numbers(text):
|
108 |
+
return normalize_numbers(text)
|
109 |
+
|
110 |
+
|
111 |
+
def collapse_whitespace(text):
|
112 |
+
return re.sub(_whitespace_re, ' ', text)
|
113 |
+
|
114 |
+
|
115 |
+
def convert_to_ascii(text):
|
116 |
+
return unidecode(text)
|
117 |
+
|
118 |
+
|
119 |
+
def normalize(text):
|
120 |
+
text = convert_to_ascii(text)
|
121 |
+
text = text.lower()
|
122 |
+
text = expand_numbers(text)
|
123 |
+
text = expand_abbreviations(text)
|
124 |
+
text = collapse_whitespace(text)
|
125 |
+
return text
|
requirements.txt
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
absl-py==2.0.0
|
2 |
aiohttp==3.8.5
|
3 |
aiosignal==1.3.1
|
|
|
4 |
appnope==0.1.3
|
5 |
asttokens==2.4.0
|
6 |
astunparse==1.6.3
|
@@ -36,6 +37,7 @@ h5py==3.9.0
|
|
36 |
huggingface-hub==0.17.3
|
37 |
idna==3.4
|
38 |
importlib-metadata==6.8.0
|
|
|
39 |
ipykernel==6.25.2
|
40 |
ipython==8.16.1
|
41 |
jedi==0.19.1
|
@@ -79,6 +81,8 @@ pyarrow==13.0.0
|
|
79 |
pyasn1==0.5.0
|
80 |
pyasn1-modules==0.3.0
|
81 |
pycparser==2.21
|
|
|
|
|
82 |
Pygments==2.16.1
|
83 |
python-dateutil==2.8.2
|
84 |
pytz==2023.3.post1
|
@@ -113,6 +117,7 @@ traitlets==5.11.2
|
|
113 |
transformers==4.34.0
|
114 |
typing_extensions==4.8.0
|
115 |
tzdata==2023.3
|
|
|
116 |
urllib3==1.26.17
|
117 |
wcwidth==0.2.8
|
118 |
Werkzeug==3.0.0
|
|
|
1 |
absl-py==2.0.0
|
2 |
aiohttp==3.8.5
|
3 |
aiosignal==1.3.1
|
4 |
+
annotated-types==0.6.0
|
5 |
appnope==0.1.3
|
6 |
asttokens==2.4.0
|
7 |
astunparse==1.6.3
|
|
|
37 |
huggingface-hub==0.17.3
|
38 |
idna==3.4
|
39 |
importlib-metadata==6.8.0
|
40 |
+
inflect==7.0.0
|
41 |
ipykernel==6.25.2
|
42 |
ipython==8.16.1
|
43 |
jedi==0.19.1
|
|
|
81 |
pyasn1==0.5.0
|
82 |
pyasn1-modules==0.3.0
|
83 |
pycparser==2.21
|
84 |
+
pydantic==2.4.2
|
85 |
+
pydantic_core==2.10.1
|
86 |
Pygments==2.16.1
|
87 |
python-dateutil==2.8.2
|
88 |
pytz==2023.3.post1
|
|
|
117 |
transformers==4.34.0
|
118 |
typing_extensions==4.8.0
|
119 |
tzdata==2023.3
|
120 |
+
Unidecode==1.3.7
|
121 |
urllib3==1.26.17
|
122 |
wcwidth==0.2.8
|
123 |
Werkzeug==3.0.0
|
scripts/prep_push_to_hf.py
CHANGED
@@ -7,6 +7,8 @@ import numpy as np
|
|
7 |
import pandas as pd
|
8 |
from datasets import Audio, Dataset, DatasetDict
|
9 |
|
|
|
|
|
10 |
MAX_DURATION_IN_SECONDS = 10.0
|
11 |
MIN_DURATION_IN_SECONDS = 1.0
|
12 |
MAX_LEN = 50
|
@@ -40,6 +42,7 @@ data = {
|
|
40 |
"audio": wavs,
|
41 |
"file": [basename(w) for w in wavs],
|
42 |
"text": metadata[1],
|
|
|
43 |
"duration": [librosa.get_duration(path=w) for w in wavs],
|
44 |
}
|
45 |
|
|
|
7 |
import pandas as pd
|
8 |
from datasets import Audio, Dataset, DatasetDict
|
9 |
|
10 |
+
from leviticus import normalize
|
11 |
+
|
12 |
MAX_DURATION_IN_SECONDS = 10.0
|
13 |
MIN_DURATION_IN_SECONDS = 1.0
|
14 |
MAX_LEN = 50
|
|
|
42 |
"audio": wavs,
|
43 |
"file": [basename(w) for w in wavs],
|
44 |
"text": metadata[1],
|
45 |
+
"norm": metadata[1].map(lambda x: normalize(x)),
|
46 |
"duration": [librosa.get_duration(path=w) for w in wavs],
|
47 |
}
|
48 |
|