|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TED-LIUM speech recognition dataset.""" |
|
import csv |
|
import os |
|
import re |
|
from collections import defaultdict |
|
from io import BytesIO |
|
from pathlib import Path |
|
|
|
import numpy as np |
|
import soundfile as sf |
|
|
|
import datasets |
|
from datasets.tasks import AutomaticSpeechRecognition |
|
|
|
|
|
_DL_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/" |
|
|
|
_LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)" |
|
|
|
_WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy/resolve/main/tedlium" |
|
_WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.csv" |
|
|
|
|
|
class TedliumReleaseConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for a release of the TED-LIUM dataset.""" |
|
|
|
def __init__(self, *, url, download_urls, split_paths, citation, **kwargs): |
|
super(TedliumReleaseConfig, self).__init__(version=datasets.Version("1.0.1"), **kwargs) |
|
self.url = url |
|
self.download_urls = download_urls |
|
|
|
|
|
self.split_paths = split_paths |
|
self.citation = citation |
|
|
|
|
|
def _make_builder_configs(): |
|
"""Creates builder configs for all supported Tedlium dataset releases.""" |
|
release1 = TedliumReleaseConfig( |
|
name="release1", |
|
description="""\ |
|
The TED-LIUM corpus is English-language TED talks, with transcriptions, |
|
sampled at 16kHz. It contains about 118 hours of speech. |
|
|
|
This is the TED-LIUM corpus release 1, |
|
licensed under Creative Commons BY-NC-ND 3.0 |
|
(http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en). |
|
""", |
|
citation="""\ |
|
@inproceedings{rousseau2012tedlium, |
|
title={TED-LIUM: an Automatic Speech Recognition dedicated corpus}, |
|
author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick}, |
|
booktitle={Conference on Language Resources and Evaluation (LREC)}, |
|
pages={125--129}, |
|
year={2012} |
|
} |
|
""", |
|
url="https://www.openslr.org/7/", |
|
download_urls={ |
|
"train": [_DL_URL + os.path.join("TEDLIUM_release1", "train.tar.gz")], |
|
"validation": [_DL_URL + os.path.join("TEDLIUM_release1", "dev.tar.gz")], |
|
"test": [_DL_URL + os.path.join("TEDLIUM_release1", "test.tar.gz")], |
|
}, |
|
split_paths=[ |
|
(datasets.Split.TRAIN, "train"), |
|
(datasets.Split.VALIDATION, "dev"), |
|
(datasets.Split.TEST, "test"), |
|
], |
|
) |
|
|
|
release2 = TedliumReleaseConfig( |
|
name="release2", |
|
description="""\ |
|
This is the TED-LIUM corpus release 2, |
|
licensed under Creative Commons BY-NC-ND 3.0 |
|
(http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en). |
|
|
|
All talks and text are property of TED Conferences LLC. |
|
|
|
The TED-LIUM corpus was made from audio talks and their transcriptions |
|
available on the TED website. We have prepared and filtered these data |
|
in order to train acoustic models to participate to the International |
|
Workshop on Spoken Language Translation 2011 (the LIUM English/French |
|
SLT system reached the first rank in the SLT task). |
|
|
|
Contains 1495 talks and transcripts. |
|
""", |
|
citation="""\ |
|
@inproceedings{rousseau2014tedlium2, |
|
title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks}, |
|
author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick}, |
|
booktitle={Conference on Language Resources and Evaluation (LREC)}, |
|
year={2014} |
|
} |
|
""", |
|
url="https://www.openslr.org/19/", |
|
download_urls={ |
|
"train": [ |
|
_DL_URL + os.path.join("TEDLIUM_release2", "train_1.tar.gz"), |
|
_DL_URL + os.path.join("TEDLIUM_release2", "train_2.tar.gz"), |
|
], |
|
"validation": [_DL_URL + os.path.join("TEDLIUM_release2", "dev.tar.gz")], |
|
"test": [_DL_URL + os.path.join("TEDLIUM_release2", "test.tar.gz")], |
|
}, |
|
split_paths=[ |
|
(datasets.Split.TRAIN, "train"), |
|
(datasets.Split.VALIDATION, "dev"), |
|
(datasets.Split.TEST, "test"), |
|
], |
|
) |
|
|
|
release3 = TedliumReleaseConfig( |
|
name="release3", |
|
description="""\ |
|
This is the TED-LIUM corpus release 3, licensed under Creative Commons |
|
BY-NC-ND 3.0. This is the 'legacy' version of the corpus, in which the dev and test datasets are the same as in |
|
TED-LIUM 2 (and TED-LIUM 1). |
|
|
|
All talks and text are property of TED Conferences LLC. |
|
|
|
This new TED-LIUM release was made through a collaboration between the |
|
Ubiqus company and the LIUM (University of Le Mans, France) |
|
|
|
Contents: |
|
|
|
- 2351 audio talks in NIST sphere format (SPH), including talks from |
|
TED-LIUM 2: be careful, same talks but not same audio files (only |
|
these audio file must be used with the TED-LIUM 3 STM files) |
|
- 452 hours of audio |
|
- 2351 aligned automatic transcripts in STM format |
|
- TEDLIUM 2 dev and test data: 19 TED talks in SPH format with |
|
corresponding manual transcriptions. |
|
- Dictionary with pronunciations (159848 entries), same file as the one |
|
included in TED-LIUM 2 |
|
- Selected monolingual data for language modeling from WMT12 publicly |
|
available corpora: these files come from the TED-LIUM 2 release, but |
|
have been modified to get a tokenization more relevant for English |
|
language |
|
|
|
""", |
|
citation="""\ |
|
@inproceedings{hernandez2018tedlium3, |
|
title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation}, |
|
author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick}, |
|
booktitle={International Conference on Speech and Computer}, |
|
pages={198--208}, |
|
year={2018}, |
|
organization={Springer} |
|
} |
|
""", |
|
url="https://www.openslr.org/51/", |
|
download_urls={ |
|
"train": [ |
|
_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_1.tar.gz"), |
|
_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_2.tar.gz"), |
|
], |
|
"validation": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "dev.tar.gz")], |
|
"test": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "test.tar.gz")], |
|
}, |
|
split_paths=[ |
|
(datasets.Split.TRAIN, "train"), |
|
(datasets.Split.VALIDATION, "dev"), |
|
(datasets.Split.TEST, "test"), |
|
], |
|
) |
|
|
|
release3_speaker_adaptation = TedliumReleaseConfig( |
|
name="release3-speaker-adaptation", |
|
description="""\ |
|
This is the TED-LIUM corpus release 3, licensed under Creative Commons |
|
BY-NC-ND 3.0. This is the 'speaker adaptation' version of the corpus, specially designed for experiments on |
|
speaker adaptation. |
|
|
|
All talks and text are property of TED Conferences LLC. |
|
|
|
This new TED-LIUM release was made through a collaboration between the |
|
Ubiqus company and the LIUM (University of Le Mans, France) |
|
""", |
|
citation="""\ |
|
@inproceedings{hernandez2018tedlium3, |
|
title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation}, |
|
author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick}, |
|
booktitle={International Conference on Speech and Computer}, |
|
pages={198--208}, |
|
year={2018}, |
|
organization={Springer} |
|
} |
|
""", |
|
url="https://www.openslr.org/51/", |
|
download_urls={ |
|
"train": [ |
|
_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train_1.tar.gz"), |
|
_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train_2.tar.gz"), |
|
], |
|
"validation": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "dev.tar.gz")], |
|
"test": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "test.tar.gz")], |
|
}, |
|
split_paths=[ |
|
(datasets.Split.TRAIN, "train"), |
|
(datasets.Split.VALIDATION, "dev"), |
|
(datasets.Split.TEST, "test"), |
|
], |
|
) |
|
|
|
return [release1, release2, release3, release3_speaker_adaptation] |
|
|
|
|
|
class TedLium(datasets.GeneratorBasedBuilder): |
|
"""The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = _make_builder_configs() |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"text": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"gender": datasets.features.ClassLabel(names=["unknown", "female", "male"]), |
|
"file": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"whisper_transcript": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=features, |
|
supervised_keys=("audio", "text"), |
|
homepage=self.config.url, |
|
license=_LICENSE, |
|
citation=self.config.citation, |
|
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name != "release3": |
|
raise ValueError("This dataset is only compatible with the `release3` config.") |
|
|
|
archive_path = dl_manager.download(self.config.download_urls) |
|
|
|
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {} |
|
|
|
transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split) for split in ["train", "validation", "test"]} |
|
transcript_archive_path = dl_manager.download(transcription_urls) |
|
|
|
splits = [] |
|
for split, path in self.config.split_paths: |
|
kwargs = { |
|
"filepath": [dl_manager.iter_archive(sharded_path) for sharded_path in archive_path[split]], |
|
"local_extracted_archive": local_extracted_archive.get(split), |
|
"split_path": path, |
|
"whisper_transcript": transcript_archive_path[split if split != "dev" else "validation"] |
|
} |
|
splits.append(datasets.SplitGenerator(name=split, gen_kwargs=kwargs)) |
|
return splits |
|
|
|
def _generate_examples(self, filepath, local_extracted_archive, split_path, whisper_transcript): |
|
whisper_transcriptions = dict() |
|
with open(whisper_transcript, encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter=",") |
|
for line in reader: |
|
whisper_transcriptions[line["file_id"]] = line["whisper_transcript"] |
|
|
|
"""Generate examples from a TED-LIUM stm file.""" |
|
if local_extracted_archive: |
|
for local_archive in local_extracted_archive: |
|
|
|
split_dir = os.path.join(local_archive, split_path) |
|
stm_files = [os.path.join(split_dir, f) for f in os.listdir(split_dir) if f.endswith(".stm")] |
|
for file in stm_files: |
|
|
|
speaker_file = Path(file).stem |
|
audio_file = os.path.join(split_dir, speaker_file + ".sph") |
|
segment, sampling_rate = sf.read(audio_file, dtype=np.int16) |
|
with open(file) as f: |
|
for line in f: |
|
line = line.strip() |
|
fn, channel, speaker, start, end, label, transcript = line.split(" ", 6) |
|
transcript = _maybe_trim_suffix(transcript) |
|
if speaker_file != fn: |
|
|
|
speaker_file = fn |
|
audio_file = os.path.join(split_dir, speaker_file + ".sph") |
|
segment, sampling_rate = sf.read(audio_file, dtype=np.int16) |
|
samples = _extract_audio_segment(segment, sampling_rate, float(start), float(end)) |
|
key = "-".join([speaker, start, end, label]) |
|
example = { |
|
"audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate}, |
|
"text": transcript, |
|
"speaker_id": speaker, |
|
"gender": _parse_gender(label), |
|
"file": audio_file, |
|
"id": key, |
|
"whisper_transcript": whisper_transcriptions.get(key, None) |
|
} |
|
yield key, example |
|
|
|
else: |
|
audio_data = {} |
|
transcripts = defaultdict(list) |
|
for file in filepath: |
|
for path, f in file: |
|
if path.endswith(".sph"): |
|
|
|
fn = path.split("/")[-1].strip(".sph") |
|
|
|
audio_data[fn] = sf.read(BytesIO(f.read()), dtype=np.int16) |
|
elif path.endswith(".stm"): |
|
for line in f: |
|
if line: |
|
line = line.decode("utf-8").strip() |
|
fn, channel, speaker, start, end, label, transcript = line.split(" ", 6) |
|
transcript = _maybe_trim_suffix(transcript) |
|
audio_file = path.replace("stm", "sph") |
|
key = "-".join([speaker, start, end, label]) |
|
|
|
transcripts[fn].append( |
|
{ |
|
"text": transcript, |
|
"speaker_id": speaker, |
|
"gender": _parse_gender(label), |
|
"file": audio_file, |
|
"id": key, |
|
"start": start, |
|
"end": end, |
|
"channel": channel, |
|
"fn": fn, |
|
} |
|
) |
|
|
|
if audio_data and audio_data.keys() == transcripts.keys(): |
|
for fn, speaker in transcripts.items(): |
|
for transcript in speaker: |
|
segment, sampling_rate = audio_data[transcript["fn"]] |
|
samples = _extract_audio_segment( |
|
segment, |
|
sampling_rate, |
|
float(transcript["start"]), |
|
float(transcript["end"]), |
|
) |
|
audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate} |
|
key = transcript["id"] |
|
transcript_text = transcript["text"] |
|
whisper_transcription = whisper_transcriptions.get(key, None) if transcript_text != "ignore_time_segment_in_scoring" else "ignore_time_segment_in_scoring" |
|
yield key, { |
|
"audio": audio, |
|
"text": transcript_text, |
|
"speaker_id": transcript["speaker_id"], |
|
"gender": transcript["gender"], |
|
"file": transcript["file"], |
|
"id": transcript["id"], |
|
"whisper_transcript": whisper_transcription |
|
} |
|
|
|
audio_data = {} |
|
transcripts = defaultdict(list) |
|
|
|
|
|
def _maybe_trim_suffix(transcript): |
|
|
|
|
|
splits = transcript.rsplit(" ", 1) |
|
transcript = splits[0] |
|
if len(splits) > 1: |
|
suffix = splits[-1] |
|
if not suffix.startswith("("): |
|
transcript += " " + suffix |
|
return transcript |
|
|
|
|
|
def _extract_audio_segment(segment, sampling_rate, start_sec, end_sec): |
|
"""Extracts segment of audio samples (as an ndarray) from the given segment.""" |
|
|
|
start_sample = int(start_sec * sampling_rate) |
|
end_sample = min(int(end_sec * sampling_rate), segment.shape[0]) |
|
samples = segment[start_sample:end_sample] |
|
return samples |
|
|
|
|
|
def _parse_gender(label_str): |
|
"""Parse gender string from STM "<label>" field.""" |
|
gender = re.split(",|_", label_str)[-1][:-1] |
|
|
|
if not gender: |
|
gender = -1 |
|
elif gender == "<NA": |
|
gender = -1 |
|
elif gender == "F": |
|
gender = "female" |
|
elif gender == "M": |
|
gender = "male" |
|
return gender |