simplified pipeline
Browse files- buckeye_asr.py +56 -27
buckeye_asr.py
CHANGED
@@ -61,6 +61,8 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
|
|
61 |
"syntactic_class": datasets.Value("string"),
|
62 |
}
|
63 |
),
|
|
|
|
|
64 |
"speaker_id": datasets.Value("string"),
|
65 |
"id": datasets.Value("string"),
|
66 |
}
|
@@ -124,19 +126,30 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
|
|
124 |
fpath = wav_path.with_suffix(".phones")
|
125 |
phonelist = self._extract_phone_info(fpath)
|
126 |
phone_seqs = self._split_phones(word_seqs, phonelist, fpath)
|
127 |
-
assert len(phone_seqs) == len(
|
128 |
|
129 |
# id_ must be a unique key
|
130 |
for idx in range(len(transcripts)):
|
|
|
131 |
id_ = f"{wav_path.stem}_{idx}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
example = {
|
133 |
"file": wav_path,
|
134 |
"audio": wav_path,
|
135 |
"text": transcripts[idx],
|
136 |
"phonetic_detail": phone_seqs[idx],
|
137 |
-
"word_detail":
|
138 |
"speaker_id": id_[1:3],
|
139 |
"id": id_,
|
|
|
|
|
140 |
}
|
141 |
yield id_, example
|
142 |
|
@@ -156,7 +169,7 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
|
|
156 |
subfields = fields[0].split()
|
157 |
label = subfields[2]
|
158 |
stop = float(subfields[0])
|
159 |
-
if label[0] in
|
160 |
# Handling tags (tags sometime miss transcriptions)
|
161 |
wordlist.append({
|
162 |
"start": start,
|
@@ -194,15 +207,21 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
|
|
194 |
if match and match.start() == 0 and segment:
|
195 |
# The model can't handle segments shorter than 0.25 ms
|
196 |
if segment[-1]["stop"] - segment[0]["start"] >= 0.025:
|
|
|
|
|
197 |
logging.warning(
|
198 |
f"Sequence shorter than 25 ms in {fpath} starting "
|
199 |
f"at {segment[0]['start']}")
|
200 |
-
word_seqs.append(segment)
|
201 |
segment = []
|
202 |
else:
|
203 |
segment.append(w)
|
204 |
-
if segment
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
206 |
return word_seqs
|
207 |
|
208 |
def _extract_phone_info(cls, fpath):
|
@@ -243,36 +262,46 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
|
|
243 |
# alignments with no duration.
|
244 |
# * if two consecutive phones are the same in the narrow transcription,
|
245 |
# they can be merged in the alignements.
|
246 |
-
# We are conservative here and every phone that completely included
|
247 |
-
#
|
248 |
-
# phone of the narrow transcription.
|
249 |
def _split_phones(cls, word_seqs, phonelist, fpath):
|
250 |
phone_seqs = []
|
251 |
i_pl = 0
|
252 |
for wl in word_seqs:
|
253 |
segment = []
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
i_pl
|
261 |
-
|
262 |
-
#
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
|
|
|
|
|
|
|
|
|
|
272 |
phone_seqs.append(segment)
|
273 |
return phone_seqs
|
274 |
|
275 |
|
|
|
|
|
|
|
|
|
|
|
276 |
def matching(phone, word):
|
277 |
# Reject phones without label
|
278 |
if phone["label"] is None:
|
|
|
61 |
"syntactic_class": datasets.Value("string"),
|
62 |
}
|
63 |
),
|
64 |
+
"start": datasets.Value("float"),
|
65 |
+
"stop": datasets.Value("float"),
|
66 |
"speaker_id": datasets.Value("string"),
|
67 |
"id": datasets.Value("string"),
|
68 |
}
|
|
|
126 |
fpath = wav_path.with_suffix(".phones")
|
127 |
phonelist = self._extract_phone_info(fpath)
|
128 |
phone_seqs = self._split_phones(word_seqs, phonelist, fpath)
|
129 |
+
assert len(phone_seqs) == len(word_seqs)
|
130 |
|
131 |
# id_ must be a unique key
|
132 |
for idx in range(len(transcripts)):
|
133 |
+
ws = word_seqs[idx]
|
134 |
id_ = f"{wav_path.stem}_{idx}"
|
135 |
+
if phone_seqs[idx]:
|
136 |
+
start = min(ws[0]["start"], phone_seqs[idx][0]["start"])
|
137 |
+
stop = max(ws[-1]["stop"], phone_seqs[idx][-1]["stop"])
|
138 |
+
else:
|
139 |
+
start = ws[0]["start"]
|
140 |
+
stop = ws[-1]["stop"]
|
141 |
+
shift_timestamps(ws, -start)
|
142 |
+
shift_timestamps(phone_seqs[idx], -start)
|
143 |
example = {
|
144 |
"file": wav_path,
|
145 |
"audio": wav_path,
|
146 |
"text": transcripts[idx],
|
147 |
"phonetic_detail": phone_seqs[idx],
|
148 |
+
"word_detail": ws,
|
149 |
"speaker_id": id_[1:3],
|
150 |
"id": id_,
|
151 |
+
"start": start,
|
152 |
+
"stop": stop,
|
153 |
}
|
154 |
yield id_, example
|
155 |
|
|
|
169 |
subfields = fields[0].split()
|
170 |
label = subfields[2]
|
171 |
stop = float(subfields[0])
|
172 |
+
if label[0] in ['<', '{']:
|
173 |
# Handling tags (tags sometime miss transcriptions)
|
174 |
wordlist.append({
|
175 |
"start": start,
|
|
|
207 |
if match and match.start() == 0 and segment:
|
208 |
# The model can't handle segments shorter than 0.25 ms
|
209 |
if segment[-1]["stop"] - segment[0]["start"] >= 0.025:
|
210 |
+
word_seqs.append(segment)
|
211 |
+
else:
|
212 |
logging.warning(
|
213 |
f"Sequence shorter than 25 ms in {fpath} starting "
|
214 |
f"at {segment[0]['start']}")
|
|
|
215 |
segment = []
|
216 |
else:
|
217 |
segment.append(w)
|
218 |
+
if segment:
|
219 |
+
if segment[-1]["stop"] - segment[0]["start"] >= 0.025:
|
220 |
+
word_seqs.append(segment)
|
221 |
+
else:
|
222 |
+
logging.warning(
|
223 |
+
f"Sequence shorter than 25 ms in {fpath} starting "
|
224 |
+
f"at {segment[0]['start']}")
|
225 |
return word_seqs
|
226 |
|
227 |
def _extract_phone_info(cls, fpath):
|
|
|
262 |
# alignments with no duration.
|
263 |
# * if two consecutive phones are the same in the narrow transcription,
|
264 |
# they can be merged in the alignements.
|
265 |
+
# We are conservative here and keep every phone that is completely included
|
266 |
+
# in the utterance + overlapping starting/ending phones if they match the
|
267 |
+
# first/last phone of the narrow transcription.
|
268 |
def _split_phones(cls, word_seqs, phonelist, fpath):
|
269 |
phone_seqs = []
|
270 |
i_pl = 0
|
271 |
for wl in word_seqs:
|
272 |
segment = []
|
273 |
+
# skip phones that precede the first word's start
|
274 |
+
while phonelist[i_pl]["stop"] <= wl[0]["start"]:
|
275 |
+
i_pl += 1
|
276 |
+
# a phone which overlaps with the start of the first word might
|
277 |
+
# not belong with the utterance
|
278 |
+
if (phonelist[i_pl]["label"] and
|
279 |
+
phonelist[i_pl]["start"] < wl[0]["start"] and
|
280 |
+
not wl[0]["narrow_transcription"].startswith(phonelist[i_pl]["label"])):
|
281 |
+
# skip the phone
|
282 |
+
i_pl += 1
|
283 |
+
# take all phones fully included in the utterance
|
284 |
+
while (i_pl < len(phonelist) and
|
285 |
+
phonelist[i_pl]["stop"] < wl[-1]["stop"]):
|
286 |
+
segment.append(phonelist[i_pl])
|
287 |
+
i_pl += 1
|
288 |
+
# a phone which overlaps with the end of the last word might
|
289 |
+
# belong with the utterance
|
290 |
+
if (phonelist[i_pl]["label"] and
|
291 |
+
phonelist[i_pl]["start"] < wl[-1]["stop"] and
|
292 |
+
wl[-1]["narrow_transcription"].endswith(phonelist[i_pl]["label"])):
|
293 |
+
# add the phone
|
294 |
+
segment.append(phonelist[i_pl])
|
295 |
+
i_pl += 1
|
296 |
phone_seqs.append(segment)
|
297 |
return phone_seqs
|
298 |
|
299 |
|
300 |
+
def shift_timestamps(detail, shift):
|
301 |
+
for item in detail:
|
302 |
+
item["start"] += shift
|
303 |
+
item["stop"] += shift
|
304 |
+
|
305 |
def matching(phone, word):
|
306 |
# Reject phones without label
|
307 |
if phone["label"] is None:
|