buckeye_asr / buckeye_asr.py
bhigy's picture
simplified pipeline
a6950bd
raw
history blame
13.6 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
import logging
from pathlib import Path
import re
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@misc{pitt2007Buckeye,
title = {Buckeye {Corpus} of {Conversational} {Speech} (2nd release).},
url = {www.buckeyecorpus.osu.edu},
publisher = {Columbus, OH: Department of Psychology, Ohio State University (Distributor)},
author = {Pitt, M.A. and Dilley, L. and Johnson, K. and Kiesling, S. and Raymond, W. and Hume, E. and Fosler-Lussier, E.},
year = {2007},
}
"""
_DESCRIPTION = """\
The Buckeye Corpus of conversational speech contains high-quality recordings
from 40 speakers in Columbus OH conversing freely with an interviewer. The
speech has been orthographically transcribed and phonetically labeled.
"""
_HOMEPAGE = "https://buckeyecorpus.osu.edu"
_LICENSE = "FREE for noncommercial uses"
class BuckeyeASRDataset(datasets.GeneratorBasedBuilder):
"""Buckeye ASR dataset."""
VERSION = datasets.Version("1.0.0")
PHONE_EXCLUSION_LIST = ["{B_TRANS}", "<EXCLUDE-name>", "IVER", "NOISE", "SIL", "VOCNOISE"]
SPLIT_TAGS = ['SIL', 'IVER']
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Value("string"),
"text": datasets.Value("string"),
"phonetic_detail": datasets.Sequence(
{
"start": datasets.Value("float"),
"stop": datasets.Value("float"),
"label": datasets.Value("string"),
}
),
"word_detail": datasets.Sequence(
{
"start": datasets.Value("float"),
"stop": datasets.Value("float"),
"label": datasets.Value("string"),
"broad_transcription": datasets.Value("string"),
"narrow_transcription": datasets.Value("string"),
"syntactic_class": datasets.Value("string"),
}
),
"start": datasets.Value("float"),
"stop": datasets.Value("float"),
"speaker_id": datasets.Value("string"),
"id": datasets.Value("string"),
}
),
supervised_keys=("file", "text"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(
audio_file_path_column="file", transcription_column="text")],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# We use 10% of the data for validation and test sets, with no speaker
# overlap between the sets. We also balance for speaker's gender and
# age.
spkr_ids_dev = [18, 34, 36, 37]
spkr_ids_test = [20, 38, 39, 40]
def is_training(i):
return i not in spkr_ids_dev and i not in spkr_ids_test
spkr_ids_train = filter(is_training, range(1, 41))
paths = {
"train": [f"{dl_manager.manual_dir}/s{i:02n}" for i in spkr_ids_train],
"dev": [f"{dl_manager.manual_dir}/s{i:02n}" for i in spkr_ids_dev],
"test": [f"{dl_manager.manual_dir}/s{i:02n}" for i in spkr_ids_test],
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"paths": paths["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"paths": paths["test"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"paths": paths["dev"]},
),
]
def _generate_examples(self, paths):
""" Yields examples as (key, example) tuples. """
for p in paths:
for wav_path in Path(p).glob("*.wav"):
# Extract words
fpath = wav_path.with_suffix(".words")
wordlist = self._extract_word_info(fpath)
word_seqs = self._split_words(wordlist, fpath)
# Extract transcript
# To avoid conflict between the transcripts (`.txt` files) and
# the word alignment files (`.words` files), we compute the
# transcripts from the latter.
transcripts = [" ".join([w["label"] for w in wseq]) for wseq in word_seqs]
# Extract phones
fpath = wav_path.with_suffix(".phones")
phonelist = self._extract_phone_info(fpath)
phone_seqs = self._split_phones(word_seqs, phonelist, fpath)
assert len(phone_seqs) == len(word_seqs)
# id_ must be a unique key
for idx in range(len(transcripts)):
ws = word_seqs[idx]
id_ = f"{wav_path.stem}_{idx}"
if phone_seqs[idx]:
start = min(ws[0]["start"], phone_seqs[idx][0]["start"])
stop = max(ws[-1]["stop"], phone_seqs[idx][-1]["stop"])
else:
start = ws[0]["start"]
stop = ws[-1]["stop"]
shift_timestamps(ws, -start)
shift_timestamps(phone_seqs[idx], -start)
example = {
"file": wav_path,
"audio": wav_path,
"text": transcripts[idx],
"phonetic_detail": phone_seqs[idx],
"word_detail": ws,
"speaker_id": id_[1:3],
"id": id_,
"start": start,
"stop": stop,
}
yield id_, example
def _extract_word_info(cls, fpath):
with open(fpath) as f:
lines = f.readlines()
start = 0
wordlist = []
for line in lines[9:]:
# File s1901b.words goes beyond the end of the audio
if fpath.name == "s1901b.words" and start > 568.739:
break
line = line.rstrip("\n")
if not line: # Skipping empty lines
continue
fields = line.split("; ")
subfields = fields[0].split()
label = subfields[2]
stop = float(subfields[0])
if label[0] in ['<', '{']:
# Handling tags (tags sometime miss transcriptions)
wordlist.append({
"start": start,
"stop": stop,
"label": label,
})
else:
# Handling words
if len(fields) < 4:
logging.warning(f"Line \"{line}\" missing fields in file {fpath}")
else:
narrow_trn = fields[2]
# Warning if the narrow_transcription is empty
if not narrow_trn:
logging.warning(f"Narrow transcription is empty in {fpath}")
wordlist.append({
"start": start,
"stop": stop,
"label": label,
"broad_transcription": fields[1],
"narrow_transcription": narrow_trn,
"syntactic_class": fields[3],
})
start = stop
return wordlist
def _split_words(cls, wordlist, fpath):
word_seqs = []
segment = []
for w in wordlist:
if w["label"][0] in ['<', '{']:
# Segmenting if the label starts with one of the SPLIT_TAGS
regexp = "|".join([f"<{st}" for st in cls.SPLIT_TAGS])
match = re.match(regexp, w["label"])
if match and match.start() == 0 and segment:
# The model can't handle segments shorter than 0.25 ms
if segment[-1]["stop"] - segment[0]["start"] >= 0.025:
word_seqs.append(segment)
else:
logging.warning(
f"Sequence shorter than 25 ms in {fpath} starting "
f"at {segment[0]['start']}")
segment = []
else:
segment.append(w)
if segment:
if segment[-1]["stop"] - segment[0]["start"] >= 0.025:
word_seqs.append(segment)
else:
logging.warning(
f"Sequence shorter than 25 ms in {fpath} starting "
f"at {segment[0]['start']}")
return word_seqs
def _extract_phone_info(cls, fpath):
with open(fpath) as f:
lines = f.readlines()
start = 0
phonelist = []
for i, line in enumerate(lines[9:]):
if line == '\n':
# empty line -> skip
continue
# some lines contain "; *" after the last field
# we should get rid of that
line = line.split(";")[0].strip()
fields = line.split()
stop = float(fields[0])
if len(fields) < 3:
# phone without label
logging.warning(
f"Phone missing label in file {fpath} at time {stop}.")
phone = None
else:
phone = fields[2]
phonelist.append({
"start": start,
"stop": stop,
"label": phone,
})
start = stop
return phonelist
# Matching phone and word alignments based on exact timestamps doesn't
# work as the timestamps don't always match.
# Also, the narrow transcription can conflict with the phone alignments
# in many ways, including:
# * wrong phone label in one of the two (e.g. `tq` instead of `t`).
# * phone absent from the narrow transcription can appear in the
# alignments with no duration.
# * if two consecutive phones are the same in the narrow transcription,
# they can be merged in the alignements.
# We are conservative here and keep every phone that is completely included
# in the utterance + overlapping starting/ending phones if they match the
# first/last phone of the narrow transcription.
def _split_phones(cls, word_seqs, phonelist, fpath):
phone_seqs = []
i_pl = 0
for wl in word_seqs:
segment = []
# skip phones that precede the first word's start
while phonelist[i_pl]["stop"] <= wl[0]["start"]:
i_pl += 1
# a phone which overlaps with the start of the first word might
# not belong with the utterance
if (phonelist[i_pl]["label"] and
phonelist[i_pl]["start"] < wl[0]["start"] and
not wl[0]["narrow_transcription"].startswith(phonelist[i_pl]["label"])):
# skip the phone
i_pl += 1
# take all phones fully included in the utterance
while (i_pl < len(phonelist) and
phonelist[i_pl]["stop"] < wl[-1]["stop"]):
segment.append(phonelist[i_pl])
i_pl += 1
# a phone which overlaps with the end of the last word might
# belong with the utterance
if (phonelist[i_pl]["label"] and
phonelist[i_pl]["start"] < wl[-1]["stop"] and
wl[-1]["narrow_transcription"].endswith(phonelist[i_pl]["label"])):
# add the phone
segment.append(phonelist[i_pl])
i_pl += 1
phone_seqs.append(segment)
return phone_seqs
def shift_timestamps(detail, shift):
for item in detail:
item["start"] += shift
item["stop"] += shift
def matching(phone, word):
# Reject phones without label
if phone["label"] is None:
return False
# Phones that are totally encompassed by the word timewise are considered
# part of the word
if phone["start"] >= word["start"] and phone["stop"] <= word["stop"]:
return True
# In case of overlap with the start or the end, we further request a time
# difference <= 20 ms and check the label
if phone["start"] < word["start"]:
# Possible overlap with the start
if (included(phone, word) and
word["narrow_transcription"].startswith(phone["label"])):
return True
# Last option is an overlap with the end of the word
return (included(phone, word) and
word["narrow_transcription"].endswith(phone["label"]))
def included(phone, word, threshold=0.02):
# We accept an overlap with time difference up to the threshold at the
# start or the end
return (phone["start"] >= word["start"] - threshold and
phone["start"] < word["stop"] and
phone["stop"] > word["start"] and
phone["stop"] <= word["stop"] + threshold)