bhigy commited on
Commit
55bda3f
1 Parent(s): 6b3b785

bugfixes + added some checks on the data

Browse files
Files changed (1) hide show
  1. buckeye_asr.py +45 -26
buckeye_asr.py CHANGED
@@ -43,7 +43,6 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
43
  {
44
  "file": datasets.Value("string"),
45
  "audio": datasets.Value("string"),
46
- #"audio": datasets.Audio(sampling_rate=16_000),
47
  "text": datasets.Value("string"),
48
  "phonetic_detail": datasets.Sequence(
49
  {
@@ -110,16 +109,10 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
110
  """ Yields examples as (key, example) tuples. """
111
  for p in paths:
112
  for wav_path in Path(p).glob("*.wav"):
113
- # TODO: when to load audio?
114
- # Extract audio
115
- #with open(wav_path) as f:
116
- # audio_data = f.read() # read audio file properly
117
- # pass
118
-
119
  # Extract words
120
  fpath = wav_path.with_suffix(".words")
121
  wordlist = self._extract_word_info(fpath)
122
- word_seqs = self._split_words(wordlist)
123
 
124
  # Extract transcript
125
  # To avoid conflict between the transcripts (`.txt` files) and
@@ -153,27 +146,44 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
153
  start = 0
154
  wordlist = []
155
  for line in lines[9:]:
 
 
 
156
  line = line.rstrip("\n")
157
- fields = line.split("; ")
158
- if len(fields) < 4:
159
- logging.warning(f"Line \"{line}\" missing fields in file {fpath}")
160
  continue
 
161
  subfields = fields[0].split()
162
- if not fields[2]:
163
- logging.warning(f"Narrow transcription is empty in {fpath}")
164
- narrow_trn = fields[2]
165
- wordlist.append({
166
- "start": start,
167
- "stop": float(subfields[0]),
168
- "label": subfields[2],
169
- "broad_transcription": fields[1],
170
- "narrow_transcription": narrow_trn,
171
- "syntactic_class": fields[3],
172
- })
173
- start = float(subfields[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  return wordlist
175
 
176
- def _split_words(cls, wordlist):
177
  word_seqs = []
178
  segment = []
179
  for w in wordlist:
@@ -182,11 +192,16 @@ class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
182
  regexp = "|".join([f"<{st}" for st in cls.SPLIT_TAGS])
183
  match = re.match(regexp, w["label"])
184
  if match and match.start() == 0 and segment:
185
- word_seqs.append(segment)
 
 
 
 
 
186
  segment = []
187
  else:
188
  segment.append(w)
189
- if segment:
190
  word_seqs.append(segment)
191
  return word_seqs
192
 
@@ -279,5 +294,9 @@ def matching(phone, word):
279
 
280
 
281
  def included(phone, word, threshold=0.02):
 
 
282
  return (phone["start"] >= word["start"] - threshold and
 
 
283
  phone["stop"] <= word["stop"] + threshold)
 
43
  {
44
  "file": datasets.Value("string"),
45
  "audio": datasets.Value("string"),
 
46
  "text": datasets.Value("string"),
47
  "phonetic_detail": datasets.Sequence(
48
  {
 
109
  """ Yields examples as (key, example) tuples. """
110
  for p in paths:
111
  for wav_path in Path(p).glob("*.wav"):
 
 
 
 
 
 
112
  # Extract words
113
  fpath = wav_path.with_suffix(".words")
114
  wordlist = self._extract_word_info(fpath)
115
+ word_seqs = self._split_words(wordlist, fpath)
116
 
117
  # Extract transcript
118
  # To avoid conflict between the transcripts (`.txt` files) and
 
146
  start = 0
147
  wordlist = []
148
  for line in lines[9:]:
149
+ # File s1901b.words goes beyond the end of the audio
150
+ if fpath.name == "s1901b.words" and start > 568.739:
151
+ break
152
  line = line.rstrip("\n")
153
+ if not line: # Skipping empty lines
 
 
154
  continue
155
+ fields = line.split("; ")
156
  subfields = fields[0].split()
157
+ label = subfields[2]
158
+ stop = float(subfields[0])
159
+ if label[0] in ['<', '{']:
160
+ # Handling tags (tags sometime miss transcriptions)
161
+ wordlist.append({
162
+ "start": start,
163
+ "stop": stop,
164
+ "label": label,
165
+ })
166
+ else:
167
+ # Handling words
168
+ if len(fields) < 4:
169
+ logging.warning(f"Line \"{line}\" missing fields in file {fpath}")
170
+ else:
171
+ narrow_trn = fields[2]
172
+ # Warning if the narrow_transcription is empty
173
+ if not narrow_trn:
174
+ logging.warning(f"Narrow transcription is empty in {fpath}")
175
+ wordlist.append({
176
+ "start": start,
177
+ "stop": stop,
178
+ "label": label,
179
+ "broad_transcription": fields[1],
180
+ "narrow_transcription": narrow_trn,
181
+ "syntactic_class": fields[3],
182
+ })
183
+ start = stop
184
  return wordlist
185
 
186
+ def _split_words(cls, wordlist, fpath):
187
  word_seqs = []
188
  segment = []
189
  for w in wordlist:
 
192
  regexp = "|".join([f"<{st}" for st in cls.SPLIT_TAGS])
193
  match = re.match(regexp, w["label"])
194
  if match and match.start() == 0 and segment:
195
+ # The model can't handle segments shorter than 0.25 ms
196
+ if segment[-1]["stop"] - segment[0]["start"] >= 0.025:
197
+ logging.warning(
198
+ f"Sequence shorter than 25 ms in {fpath} starting "
199
+ f"at {segment[0]['start']}")
200
+ word_seqs.append(segment)
201
  segment = []
202
  else:
203
  segment.append(w)
204
+ if segment and segment[-1]["stop"] - segment[0]["start"] >= 0.025:
205
  word_seqs.append(segment)
206
  return word_seqs
207
 
 
294
 
295
 
296
  def included(phone, word, threshold=0.02):
297
+ # We accept an overlap with time difference up to the threshold at the
298
+ # start or the end
299
  return (phone["start"] >= word["start"] - threshold and
300
+ phone["start"] < word["stop"] and
301
+ phone["stop"] > word["start"] and
302
  phone["stop"] <= word["stop"] + threshold)