jimregan commited on
Commit
1a9a056
1 Parent(s): df72c0c

update mix handling code

Browse files
Files changed (1) hide show
  1. waxholm.py +566 -51
waxholm.py CHANGED
@@ -1,6 +1,6 @@
1
  # coding=utf-8
2
  # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- # Copyright 2022 Jim O'Regan for Språkbanken Tal
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
  # you may not use this file except in compliance with the License.
@@ -20,6 +20,9 @@
20
  from io import BytesIO
21
  import os
22
  import soundfile as sf
 
 
 
23
 
24
  import datasets
25
  from datasets.tasks import AutomaticSpeechRecognition
@@ -54,6 +57,13 @@ _CITATION = """
54
  _URL = "http://www.speech.kth.se/waxholm/waxholm2.html"
55
 
56
 
 
 
 
 
 
 
 
57
  class WaxholmDataset(datasets.GeneratorBasedBuilder):
58
  """Dataset script for Waxholm."""
59
 
@@ -68,6 +78,7 @@ class WaxholmDataset(datasets.GeneratorBasedBuilder):
68
  {
69
  "id": datasets.Value("string"),
70
  "text": datasets.Value("string"),
 
71
  "audio": datasets.Audio(sampling_rate=16_000)
72
  }
73
  )
@@ -79,7 +90,7 @@ class WaxholmDataset(datasets.GeneratorBasedBuilder):
79
  homepage=_URL,
80
  citation=_CITATION,
81
  task_templates=[
82
- AutomaticSpeechRecognition(audio_file_path_column="path", transcription_column="text")
83
  ],
84
  )
85
 
@@ -120,46 +131,81 @@ class WaxholmDataset(datasets.GeneratorBasedBuilder):
120
  buffer = BytesIO()
121
  sf.write(buffer, samples, sr, format="wav")
122
  blank = Audio()
123
- audio_to_pass = blank.encode_example(value = {"bytes": buffer.getvalue(), "sampling_rate": sr, })
124
  yield line, {
125
  "id": line,
126
  "text": mix.text,
127
- "audio": audio_to_pass
 
 
 
 
128
  }
129
 
130
 
131
  def fix_text(text: str) -> str:
132
- replacements = text.maketrans("{}|\\", "äåöÖ")
133
  return text.translate(replacements)
134
 
135
 
 
 
 
136
  class FR:
137
- def __init__(self, text: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  if not text.startswith("FR"):
139
- raise IOError("Unknown line type (does not begin with 'FR'): " + text)
140
  parts = [a.strip() for a in text.split("\t")]
141
  self.frame = parts[0][2:].strip()
142
  if parts[-1].strip().endswith(" sec"):
143
  self.seconds = parts[-1].strip()[0:-4]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  for subpart in parts[1:-1]:
145
- if subpart.startswith("$#"):
146
- self.type = 'I'
147
- self.phone_type = fix_text(subpart[0:2])
148
- self.phone = fix_text(subpart[2:])
149
- elif subpart.startswith("$"):
150
- self.type = 'I'
151
- self.phone_type = fix_text(subpart[0:2])
152
- self.phone = fix_text(subpart[2:])
153
- elif subpart.startswith("#"):
154
- self.type = 'B'
155
- self.phone_type = fix_text(subpart[0:2])
156
- self.phone = fix_text(subpart[2:])
157
  elif subpart.startswith(">pm "):
158
- self.pm_type = fix_text(subpart[4:5])
159
- self.pm = fix_text(subpart[5:])
 
 
160
  elif subpart.startswith(">pm. "):
161
- self.pm_type = fix_text(subpart[4:5])
162
- self.pm = fix_text(subpart[5:])
 
 
163
  elif subpart.startswith(">w "):
164
  self.type = 'B'
165
  self.word = fix_text(subpart[3:])
@@ -168,6 +214,10 @@ class FR:
168
  self.type = 'B'
169
  self.word = fix_text(subpart[4:])
170
  self.pseudoword = False
 
 
 
 
171
  elif subpart.startswith("X"):
172
  if hasattr(self, 'type'):
173
  print(self.type, self.type == 'B')
@@ -176,49 +226,514 @@ class FR:
176
  self.pseudoword = True
177
  elif subpart == "OK":
178
  self.type = 'E'
 
 
179
 
 
 
 
 
 
180
 
181
  def __repr__(self):
182
  parts = []
183
- parts.append(f"type: {self.type}")
184
  parts.append(f"frame: {self.frame}")
185
- if self.type != 'E':
186
- parts.append(f"phone: {self.phone}")
187
  if 'word' in self.__dict__:
188
  parts.append(f"word: {self.word}")
189
  if 'pm_type' in self.__dict__:
190
  parts.append(f"pm_type: {self.pm_type}")
191
  if 'pm' in self.__dict__:
192
  parts.append(f"pm: {self.pm}")
193
- parts.append(f"sec: {self.seconds}")
194
- return f"FR(" + ", ".join(parts) + ")"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
 
197
  class Mix():
198
- def __init__(self, filepath: str):
199
  self.fr = []
200
- with open(filepath) as inpf:
201
- saw_text = False
202
- saw_phoneme = False
203
- saw_labels = False
204
- for line in inpf.readlines():
205
- if line.startswith("Waxholm dialog."):
206
- self.filepath = line[15:].strip()
207
- if line.startswith("TEXT:"):
208
- saw_text = True
209
- continue
210
- if saw_text:
211
- self.text = fix_text(line.strip())
212
- saw_text = False
213
- if line.startswith("FR "):
214
- if saw_labels:
215
- saw_labels = False
216
- self.fr.append(FR(line))
217
- if line.startswith("Labels: "):
218
- self.labels = line[8:].strip()
219
- saw_labels = True
220
- if saw_labels and line.startswith(" "):
221
- self.labels += line.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
 
224
  def smp_probe(filename: str) -> bool:
 
1
  # coding=utf-8
2
  # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ # Copyright 2022, 2023 Jim O'Regan for Språkbanken Tal
4
  #
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
  # you may not use this file except in compliance with the License.
 
20
  from io import BytesIO
21
  import os
22
  import soundfile as sf
23
+ from collections import namedtuple
24
+ from copy import deepcopy
25
+ from difflib import SequenceMatcher
26
 
27
  import datasets
28
  from datasets.tasks import AutomaticSpeechRecognition
 
57
  _URL = "http://www.speech.kth.se/waxholm/waxholm2.html"
58
 
59
 
60
+ class FRExpected(Exception):
61
+ """Exception to raise when FR line was expected, but not read"""
62
+ def __init__(self, line):
63
+ msg = "Unknown line type (does not begin with 'FR'): "
64
+ super().__init__(msg + line)
65
+
66
+
67
  class WaxholmDataset(datasets.GeneratorBasedBuilder):
68
  """Dataset script for Waxholm."""
69
 
 
78
  {
79
  "id": datasets.Value("string"),
80
  "text": datasets.Value("string"),
81
+ "phonemes": datasets.Sequence(datasets.Value("string")),
82
  "audio": datasets.Audio(sampling_rate=16_000)
83
  }
84
  )
 
90
  homepage=_URL,
91
  citation=_CITATION,
92
  task_templates=[
93
+ AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")
94
  ],
95
  )
96
 
 
131
  buffer = BytesIO()
132
  sf.write(buffer, samples, sr, format="wav")
133
  blank = Audio()
 
134
  yield line, {
135
  "id": line,
136
  "text": mix.text,
137
+ "phonemes": mix.get_phoneme_list(),
138
+ "audio": {
139
+ "bytes": buffer.getvalue(),
140
+ "sampling_rate": sr,
141
+ }
142
  }
143
 
144
 
145
  def fix_text(text: str) -> str:
146
+ replacements = text.maketrans("{}|\\[]", "äåöÖÄÅ")
147
  return text.translate(replacements)
148
 
149
 
150
+ Label = namedtuple('Label', ['start', 'end', 'label'])
151
+
152
+
153
  class FR:
154
+ def __init__(self, text="", **kwargs): # C901
155
+ if text and text != "":
156
+ self.from_text(text)
157
+ else:
158
+ for arg in kwargs:
159
+ prms = ["pm", "pm_type", "type", "frame",
160
+ "seconds", "phone", "phone_type",
161
+ "word", "pseudoword"]
162
+ if arg in prms:
163
+ self.__dict__[arg] = kwargs[arg]
164
+ else:
165
+ print(f"Unrecognised argument: {arg}")
166
+
167
+ def from_text(self, text: str):
168
  if not text.startswith("FR"):
169
+ raise FRExpected(text)
170
  parts = [a.strip() for a in text.split("\t")]
171
  self.frame = parts[0][2:].strip()
172
  if parts[-1].strip().endswith(" sec"):
173
  self.seconds = parts[-1].strip()[0:-4]
174
+ def split_phone(phone):
175
+ if phone.startswith("$#"):
176
+ phtype = 'I'
177
+ phone_type = fix_text(phone[0:2])
178
+ phone_out = fix_text(phone[2:])
179
+ elif phone.startswith("$") or phone.startswith("#"):
180
+ phtype = 'I'
181
+ phone_type = fix_text(phone[0:1])
182
+ phone_out = fix_text(phone[1:])
183
+ else:
184
+ print(phone)
185
+ return None
186
+ return {
187
+ "type": phtype,
188
+ "phone_type": phone_type,
189
+ "phone": phone_out
190
+ }
191
  for subpart in parts[1:-1]:
192
+ subpart = subpart.strip()
193
+ if subpart.startswith("$#") or subpart.startswith("$") or subpart.startswith("#"):
194
+ phparts = split_phone(subpart)
195
+ if phparts is not None:
196
+ self.type = phparts['type']
197
+ self.phone_type = phparts['phone_type']
198
+ self.phone = phparts['phone']
 
 
 
 
 
199
  elif subpart.startswith(">pm "):
200
+ phparts = split_phone(subpart[4:])
201
+ if phparts is not None:
202
+ self.pm_type = phparts['phone_type']
203
+ self.pm = phparts['phone']
204
  elif subpart.startswith(">pm. "):
205
+ phparts = split_phone(subpart[5:])
206
+ if phparts is not None:
207
+ self.pm_type = phparts['phone_type']
208
+ self.pm = phparts['phone']
209
  elif subpart.startswith(">w "):
210
  self.type = 'B'
211
  self.word = fix_text(subpart[3:])
 
214
  self.type = 'B'
215
  self.word = fix_text(subpart[4:])
216
  self.pseudoword = False
217
+ elif subpart == "> XklickX" or subpart == "> XutandX":
218
+ self.type = 'B'
219
+ self.word = subpart[2:]
220
+ self.pseudoword = True
221
  elif subpart.startswith("X"):
222
  if hasattr(self, 'type'):
223
  print(self.type, self.type == 'B')
 
226
  self.pseudoword = True
227
  elif subpart == "OK":
228
  self.type = 'E'
229
+ elif subpart == "PROBLEMS":
230
+ self.type = 'E'
231
 
232
+ def get_type(self):
233
+ if "type" in self.__dict__:
234
+ return self.type
235
+ else:
236
+ return ""
237
 
238
  def __repr__(self):
239
  parts = []
240
+ parts.append(f"type: {self.get_type()}")
241
  parts.append(f"frame: {self.frame}")
242
+ if self.get_type() != 'E':
243
+ parts.append(f"phone: {self.get_phone()}")
244
  if 'word' in self.__dict__:
245
  parts.append(f"word: {self.word}")
246
  if 'pm_type' in self.__dict__:
247
  parts.append(f"pm_type: {self.pm_type}")
248
  if 'pm' in self.__dict__:
249
  parts.append(f"pm: {self.pm}")
250
+ if 'seconds' in self.__dict__:
251
+ parts.append(f"sec: {self.seconds}")
252
+ return "FR(" + ", ".join(parts) + ")"
253
+
254
+ def fix_type(self):
255
+ if self.is_type("B") and self.get_word() == "":
256
+ self.pm_type = "$"
257
+ self.phone_type = "$"
258
+ self.type = "I"
259
+
260
+ def get_phone(self, fix_accents=True):
261
+ def fix_accents(phone, fix_accents=True):
262
+ if not fix_accents:
263
+ return phone
264
+ return phone.replace("'", "ˈ").replace('"', "ˌ")
265
+ if 'pm' in self.__dict__:
266
+ return fix_accents(self.pm, fix_accents)
267
+ elif 'phone' in self.__dict__:
268
+ return fix_accents(self.phone, fix_accents)
269
+ else:
270
+ return None
271
+
272
+ def is_silence_word(self, noise=False):
273
+ if 'word' in self.__dict__:
274
+ if not noise:
275
+ return self.word == "XX"
276
+ else:
277
+ return self.word.startswith("X") and self.word.endswith("X")
278
+ else:
279
+ return False
280
+
281
+ def is_type(self, type):
282
+ if "type" in self.__dict__:
283
+ return type == self.type
284
+ else:
285
+ return False
286
+
287
+ def has_seconds(self):
288
+ return "seconds" in self.__dict__
289
+
290
+ def get_seconds(self):
291
+ if not self.has_seconds() and "frame" in self.__dict__:
292
+ return int(self.frame) / 16000.0
293
+ else:
294
+ return self.seconds
295
+
296
+ def get_word(self):
297
+ if self.has_word():
298
+ return self.word
299
+ else:
300
+ return ""
301
+
302
+ def has_word(self):
303
+ return "word" in self.__dict__
304
+
305
+ def has_pseudoword(self):
306
+ return "pseudoword" in self.__dict__
307
+
308
+
309
+ def merge_frs(fr1, fr2, check_time=False):
310
+ """
311
+ Merge FRS entries for plosives: by default, the
312
+ period of glottal closure and the burst are separately
313
+ annotated.
314
+ """
315
+ if fr2.has_word():
316
+ return None
317
+ if check_time:
318
+ if fr1.get_seconds() != fr2.get_seconds():
319
+ return None
320
+ if _is_glottal_closure(fr1.get_phone(), fr2.get_phone()):
321
+ if not fr1.has_word():
322
+ return fr2
323
+ else:
324
+ word = None
325
+ if fr1.has_word():
326
+ word = fr1.word
327
+ pword = None
328
+ if fr1.has_pseudoword():
329
+ pword = fr1.pseudoword
330
+ return FR(pm=fr2.pm, pm_type=fr2.pm_type, type=fr2.type,
331
+ frame=fr2.frame, seconds=fr2.seconds, phone=fr2.phone,
332
+ phone_type=fr2.phone_type, word=word, pseudoword=pword)
333
+
334
+
335
+ SILS = {
336
+ "K": "k",
337
+ "G": "g",
338
+ "T": "t",
339
+ "D": "d",
340
+ "2T": "2t",
341
+ "2D": "2d",
342
+ "P": "p",
343
+ "B": "b"
344
+ }
345
+ def _is_glottal_closure(cur, next):
346
+ return cur in SILS and next == SILS[cur]
347
+
348
+
349
+ def _replace_glottal_closures(input):
350
+ input += ' '
351
+ for sil in SILS:
352
+ input = input.replace(f"{sil} {SILS[sil]} ", f"{SILS[sil]} ")
353
+ return input[:-1]
354
+
355
+ def _fix_duration_markers(input):
356
+ input += ' '
357
+ input = input.replace(":+ ", ": ")
358
+ return input[:-1]
359
 
360
 
361
  class Mix():
362
+ def __init__(self, filepath: str, stringfile=None, fix_type=True):
363
  self.fr = []
364
+ self.path = filepath
365
+ if stringfile is None:
366
+ with open(filepath) as inpf:
367
+ self.read_data(inpf.readlines())
368
+ else:
369
+ self.read_data(stringfile.split("\n"))
370
+ if fix_type:
371
+ for fr in self.fr:
372
+ fr.fix_type()
373
+
374
+ def read_data(self, inpf): # C901
375
+ """read data from text of a .mix file"""
376
+ saw_text = False
377
+ saw_phoneme = False
378
+ saw_labels = False
379
+ for line in inpf:
380
+ if line.startswith("Waxholm dialog."):
381
+ self.filepath = line[15:].strip()
382
+ if line.startswith("TEXT:"):
383
+ saw_text = True
384
+ continue
385
+ if saw_text:
386
+ self.text = fix_text(line.strip())
387
+ saw_text = False
388
+ if line.startswith("PHONEME:"):
389
+ saw_phoneme = True
390
+ self.phoneme = fix_text(line[8:].strip())
391
+ if line[8:].strip().endswith("."):
392
+ saw_phoneme = False
393
+ continue
394
+ if saw_phoneme:
395
+ self.phoneme = fix_text(line.strip())
396
+ if line[8:].strip().endswith("."):
397
+ saw_phoneme = False
398
+ if line.startswith("FR "):
399
+ if saw_labels:
400
+ saw_labels = False
401
+ self.fr.append(FR(text=line))
402
+ if line.startswith("Labels: "):
403
+ self.labels = line[8:].strip()
404
+ saw_labels = True
405
+ if saw_labels and line.startswith(" "):
406
+ self.labels += line.strip()
407
+
408
+ def check_fr(self, verbose=False) -> bool:
409
+ """
410
+ Simple sanity check: that there were FR lines,
411
+ and that the first was a start type, and
412
+ last was an end type.
413
+ """
414
+ if 'fr' not in self.__dict__:
415
+ return False
416
+ if len(self.fr) == 0:
417
+ return False
418
+ start_end = self.fr[0].is_type("B") and self.fr[-1].is_type("E")
419
+ if verbose and not start_end:
420
+ if not self.fr[0].is_type("B"):
421
+ print(f"{self.path}: missing start type")
422
+ if not self.fr[-1].is_type("E"):
423
+ print(f"{self.path}: missing end type")
424
+ return start_end
425
+
426
+ def get_times(self, as_frames=False):
427
+ """
428
+ get the times of each phoneme
429
+ """
430
+ if not self.check_fr(verbose=True):
431
+ return []
432
+ if as_frames:
433
+ times = [int(x.frame) for x in self.fr]
434
+ else:
435
+ times = [float(x.seconds) for x in self.fr]
436
+ return times
437
+
438
+ def get_time_pairs(self, as_frames=False):
439
+ """
440
+ get a list of tuples containing start and end times
441
+ By default, the times are in seconds; if `as_frames`
442
+ is set, the number of frames are returned instead.
443
+ """
444
+ times = self.get_times(as_frames=as_frames)
445
+ starts = times[0:-1]
446
+ ends = times[1:]
447
+ return [x for x in zip(starts, ends)]
448
+
449
+ def prune_empty_presilences(self, verbose=False, include_noises=False):
450
+ """
451
+ Remove empty silence markers (i.e., those with no distinct duration)
452
+ """
453
+ self.orig_fr = deepcopy(self.fr)
454
+ i = 0
455
+ warned = False
456
+ def check_cur(cur, next):
457
+ if verbose and not cur.has_seconds():
458
+ print(f"Missing seconds: {self.path}\nLine: {cur}")
459
+ if verbose and not next.has_seconds():
460
+ print(f"Missing seconds: {self.path}\nLine: {next}")
461
+ return cur.get_seconds() == next.get_seconds() and cur.is_silence_word()
462
+ todel = []
463
+ while i < len(self.fr) - 1:
464
+ if check_cur(self.fr[i], self.fr[i + 1]):
465
+ if verbose:
466
+ if not warned:
467
+ warned = True
468
+ print(f"Empty silence in {self.path}:")
469
+ print(self.fr[i])
470
+ todel.append(i)
471
+ i += 1
472
+ if todel is not None and todel != []:
473
+ for chaff in todel.reverse():
474
+ del(self.fr[chaff])
475
+
476
+ def prune_empty_postsilences(self, verbose=False, include_noises=False):
477
+ """
478
+ Remove empty silence markers (i.e., those with no distinct duration)
479
+ """
480
+ if not "orig_fr" in self.__dict__:
481
+ self.orig_fr = deepcopy(self.fr)
482
+ i = 1
483
+ warned = False
484
+ def check_cur(cur, prev):
485
+ if verbose and not cur.has_seconds():
486
+ print(f"Missing seconds: {self.path}\nLine: {cur}")
487
+ if verbose and not prev.has_seconds():
488
+ print(f"Missing seconds: {self.path}\nLine: {prev}")
489
+ return cur.get_seconds() == prev.get_seconds() and cur.is_silence_word()
490
+ todel = []
491
+ while i < len(self.fr):
492
+ if check_cur(self.fr[i], self.fr[i - 1]):
493
+ if verbose:
494
+ if not warned:
495
+ warned = True
496
+ print(f"Empty silence in {self.path}:")
497
+ print(self.fr[i])
498
+ todel.append(i)
499
+ i += 1
500
+ if todel is not None and todel != []:
501
+ for chaff in todel.reverse():
502
+ del(self.fr[chaff])
503
+
504
+ def prune_empty_segments(self, verbose=False):
505
+ """
506
+ Remove empty segments (i.e., those with no distinct duration)
507
+ """
508
+ if not "orig_fr" in self.__dict__:
509
+ self.orig_fr = deepcopy(self.fr)
510
+ times = self.get_time_pairs(as_frames=True)
511
+ if len(times) != (len(self.fr) - 1):
512
+ print("Uh oh: time pairs and items don't match")
513
+ else:
514
+ keep = []
515
+ for fr in zip(self.fr[:-1], times):
516
+ cur_time = fr[1]
517
+ if cur_time[0] == cur_time[1]:
518
+ if verbose:
519
+ print(f"Empty segment {fr[0].get_phone()} ({cur_time[0]} --> {cur_time[1]})")
520
+ else:
521
+ keep.append(fr[0])
522
+ keep.append(self.fr[-1])
523
+ self.fr = keep
524
+
525
+ def prune_empty_silences(self, verbose = False):
526
+ self.prune_empty_presilences(verbose)
527
+ self.prune_empty_postsilences(verbose)
528
+
529
+ def merge_plosives(self, verbose=False):
530
+ """
531
+ Merge plosives in FRs
532
+ (in Waxholm, as in TIMIT, the silence before the burst and the burst
533
+ are annotated separately).
534
+ """
535
+ if not "orig_fr" in self.__dict__:
536
+ self.orig_fr = deepcopy(self.fr)
537
+ tmp = []
538
+ i = 0
539
+ while i < len(self.fr)-1:
540
+ merged = merge_frs(self.fr[i], self.fr[i+1])
541
+ if merged is not None:
542
+ if verbose:
543
+ print(f"Merging {self.fr[i]} and {self.fr[i+1]}")
544
+ i += 1
545
+ tmp.append(merged)
546
+ else:
547
+ tmp.append(self.fr[i])
548
+ i += 1
549
+ tmp.append(self.fr[-1])
550
+ self.fr = tmp
551
+
552
+ def get_phone_label_tuples(self, as_frames=False, fix_accents=True):
553
+ times = self.get_time_pairs(as_frames=as_frames)
554
+ if self.check_fr():
555
+ labels = [fr.get_phone(fix_accents) for fr in self.fr[0:-1]]
556
+ else:
557
+ labels = []
558
+ if len(times) == len(labels):
559
+ out = []
560
+ for z in zip(times, labels):
561
+ out.append((z[0][0], z[0][1], z[1]))
562
+ return out
563
+ else:
564
+ return []
565
+
566
+ def get_merged_plosives(self, noop=False, prune_empty=True):
567
+ """
568
+ Returns a list of phones with plosives merged
569
+ (in Waxholm, as in TIMIT, the silence before the burst and the burst
570
+ are annotated separately).
571
+ If `noop` is True, it simply returns the output of `prune_empty_labels()`
572
+ """
573
+ if noop:
574
+ if not prune_empty:
575
+ print("Warning: not valid to set noop to True and prune_empty to false")
576
+ print("Ignoring prune_empty")
577
+ return self.prune_empty_labels()
578
+ i = 0
579
+ out = []
580
+ if prune_empty:
581
+ labels = self.prune_empty_labels()
582
+ else:
583
+ labels = self.get_phone_label_tuples()
584
+ while i < len(labels)-1:
585
+ cur = labels[i]
586
+ next = labels[i+1]
587
+ if _is_glottal_closure(cur[2], next[2]):
588
+ tmp = Label(start = cur[0], end = next[1], label = next[2])
589
+ out.append(tmp)
590
+ i += 2
591
+ else:
592
+ tmp = Label(start = cur[0], end = cur[1], label = cur[2])
593
+ out.append(tmp)
594
+ i += 1
595
+ return out
596
+
597
+ def get_word_label_tuples(self, verbose=True):
598
+ times = self.get_time_pairs()
599
+ if len(times) == len(self.fr[0:-1]):
600
+ out = []
601
+ labels_raw = [x for x in zip(times, self.fr[0:-1])]
602
+ i = 0
603
+ cur = None
604
+ while i < len(labels_raw) - 1:
605
+ if labels_raw[i][1].is_type("B"):
606
+ if cur is not None:
607
+ out.append(cur)
608
+ if labels_raw[i+1][1].is_type("B"):
609
+ if verbose and labels_raw[i][1].get_word() == "":
610
+ print("Expected word", labels_raw[i][1])
611
+ out.append((labels_raw[i][0][0], labels_raw[i][0][1], labels_raw[i][1].get_word()))
612
+ cur = None
613
+ i += 1
614
+ continue
615
+ else:
616
+ if verbose and labels_raw[i][1].get_word() == "":
617
+ print("Expected word", labels_raw[i][1])
618
+ cur = (labels_raw[i][0][0], labels_raw[i][0][1], labels_raw[i][1].get_word())
619
+ if labels_raw[i+1][1].is_type("B"):
620
+ if cur is not None:
621
+ cur = (cur[0], labels_raw[i][0][1], cur[2])
622
+ i += 1
623
+ out.append(cur)
624
+ return out
625
+ else:
626
+ return []
627
+
628
+ def get_dictionary(self, fix_accents=True):
629
+ """
630
+ Get pronunciation dictionary entries from the .mix file.
631
+ These entries are based on the corrected pronunciations; for
632
+ the lexical pronunciations, use the `phoneme` property.
633
+ """
634
+ output = {}
635
+ current_phones = []
636
+ prev_word = ''
637
+
638
+ for fr in self.fr:
639
+ if 'word' in fr.__dict__:
640
+ phone = fr.get_phone(fix_accents)
641
+ if prev_word != "":
642
+ if prev_word not in output:
643
+ output[prev_word] = []
644
+ output[prev_word].append(current_phones.copy())
645
+ current_phones.clear()
646
+ prev_word = fr.word
647
+ current_phones.append(phone)
648
+ elif fr.is_type("I"):
649
+ phone = fr.get_phone(fix_accents)
650
+ current_phones.append(phone)
651
+ else:
652
+ if prev_word not in output:
653
+ output[prev_word] = []
654
+ output[prev_word].append(current_phones.copy())
655
+ return output
656
+
657
+ def get_dictionary_list(self, fix_accents=True):
658
+ """
659
+ Get pronunciation dictionary entries from the .mix file.
660
+ These entries are based on the corrected pronunciations; for
661
+ the lexical pronunciations, use the `phoneme` property.
662
+ This version creates a list of tuples (word, phones) that
663
+ preserves the order of the entries.
664
+ """
665
+ output = []
666
+ current_phones = []
667
+ prev_word = ''
668
+
669
+ for fr in self.fr:
670
+ if 'word' in fr.__dict__:
671
+ phone = fr.get_phone(fix_accents)
672
+ if prev_word != "":
673
+ output.append((prev_word, " ".join(current_phones)))
674
+ current_phones.clear()
675
+ prev_word = fr.word
676
+ current_phones.append(phone)
677
+ elif fr.is_type("I"):
678
+ phone = fr.get_phone(fix_accents)
679
+ current_phones.append(phone)
680
+ else:
681
+ output.append((prev_word, " ".join(current_phones)))
682
+ return output
683
+
684
+ def get_phoneme_string(self, insert_pauses=True, fix_accents=True):
685
+ """
686
+ Get an opinionated phoneme string
687
+
688
+ Args:
689
+ insert_pauses (bool, optional): Insert pauses between words. Defaults to True.
690
+ fix_accents (bool, optional): IPA-ify accents. Defaults to True.
691
+ """
692
+ dict_list = self.get_dictionary_list(fix_accents)
693
+ skip = ['p:', '.']
694
+ if insert_pauses:
695
+ phone_strings = [x[1] for x in dict_list if x[1] not in skip]
696
+ joined = ' p: '.join(phone_strings)
697
+ else:
698
+ phone_strings = [x[1] for x in dict_list if x[1] != "."]
699
+ joined = ' '.join(phone_strings)
700
+ joined = _replace_glottal_closures(joined)
701
+ joined = _fix_duration_markers(joined)
702
+ return joined
703
+
704
+ def get_phoneme_list(self, insert_pauses=True, fix_accents=True):
705
+ return self.get_phoneme_string(insert_pauses, fix_accents).split(' ')
706
+
707
+ def get_compare_dictionary(self, fix_accents=True, merge_plosives=True, only_changed=True):
708
+ """
709
+ Get pronunciation dictionary for comparision: i.e., where there is a difference
710
+ between the canonical pronunciation and what was spoken
711
+ """
712
+ if merge_plosives:
713
+ self.merge_plosives()
714
+ orig = self.get_dictionary_list(fix_accents)
715
+ self.prune_empty_segments()
716
+ new = self.get_dictionary_list(fix_accents)
717
+ if len(orig) != len(new):
718
+ words_orig = [w[0] for w in orig]
719
+ words_new = [w[0] for w in new]
720
+ skippables = []
721
+ for tag, i, j, _, _ in SequenceMatcher(None, words_orig, words_new).get_opcodes():
722
+ if tag in ('delete', 'replace'):
723
+ skippables += [a for a in range(i, j)]
724
+ for c in skippables.reverse():
725
+ del(orig[c])
726
+ out = []
727
+ i = 0
728
+ while i < len(orig):
729
+ if orig[i][0] == new[i][0]:
730
+ if orig[i][1] == new[i][1]:
731
+ if not only_changed:
732
+ out.append(orig)
733
+ else:
734
+ out.append((orig[i][0], orig[i][1], new[i][1]))
735
+ i += 1
736
+ return out
737
 
738
 
739
  def smp_probe(filename: str) -> bool: