Datasets:
Tasks:
Automatic Speech Recognition
Multilinguality:
multilingual
Size Categories:
100K<n<1M
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
# coding=utf-8 | |
# Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""Multilingual Librispeech automatic speech recognition dataset.""" | |
from functools import partial | |
import os | |
import datasets | |
from datasets.streaming import xopen | |
_CITATION = """\ | |
@article{Pratap2020MLSAL, | |
title={MLS: A Large-Scale Multilingual Dataset for Speech Research}, | |
author={Vineel Pratap and Qiantong Xu and Anuroop Sriram and Gabriel Synnaeve and Ronan Collobert}, | |
journal={ArXiv}, | |
year={2020}, | |
volume={abs/2012.03411} | |
} | |
""" | |
_DESCRIPTION = """\ | |
This is a streamable version of the Multilingual LibriSpeech (MLS) dataset. | |
The data archives were restructured from the original ones from [OpenSLR](http://www.openslr.org/94) | |
to make it easier to stream. | |
MLS dataset is a large multilingual corpus suitable for speech research. | |
The dataset is derived from read audiobooks from LibriVox and consists of 8 languages: | |
English, German, Dutch, Spanish, French, Italian, Portuguese, Polish. | |
""" | |
_URL = "http://www.openslr.org/94" | |
_DL_URL_FORMAT = "data/mls_{name}" | |
class MultilingualLibrispeechConfig(datasets.BuilderConfig): | |
"""BuilderConfig for MultilingualLibrispeech.""" | |
def __init__(self, name, **kwargs): | |
""" | |
Args: | |
name: `string`, name of dataset config (=language) | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(MultilingualLibrispeechConfig, self).__init__( | |
version=datasets.Version("2.1.0", ""), name=name, **kwargs | |
) | |
# relative path to full data inside a repo (for example `data/mls_german`) | |
self.data_root_dir = _DL_URL_FORMAT.format(name=name) | |
class MultilingualLibrispeech(datasets.GeneratorBasedBuilder): | |
"""Multilingual Librispeech dataset.""" | |
BUILDER_CONFIGS = [ | |
MultilingualLibrispeechConfig(name="german", description="German LibriSpeech dataset"), | |
MultilingualLibrispeechConfig(name="dutch", description="Dutch LibriSpeech dataset"), | |
MultilingualLibrispeechConfig(name="french", description="French LibriSpeech dataset"), | |
MultilingualLibrispeechConfig(name="spanish", description="Spanish LibriSpeech dataset"), | |
MultilingualLibrispeechConfig(name="italian", description="Italian LibriSpeech dataset"), | |
MultilingualLibrispeechConfig(name="portuguese", description="Portuguese LibriSpeech dataset"), | |
MultilingualLibrispeechConfig(name="polish", description="Polish LibriSpeech dataset"), | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"file": datasets.Value("string"), | |
"audio": datasets.features.Audio(sampling_rate=16_000), | |
"text": datasets.Value("string"), | |
"speaker_id": datasets.Value("int64"), | |
"chapter_id": datasets.Value("int64"), | |
"id": datasets.Value("string"), | |
} | |
), | |
supervised_keys=("file", "text"), | |
homepage=_URL, | |
citation=_CITATION, | |
task_templates=None, | |
) | |
def _split_generators(self, dl_manager): | |
download_kwargs = { | |
"dl_manager": dl_manager, | |
"root_dir": self.config.data_root_dir | |
} | |
download_transcript = partial( | |
download_extract_transcript, **download_kwargs | |
) | |
download_audio_non_streaming = partial( | |
download_extract_audio_archives, **download_kwargs | |
) | |
download_audio_streaming = partial( | |
download_audio_archives, **download_kwargs | |
) | |
download_limited_ids = partial( | |
download_extract_limited_ids, **download_kwargs | |
) | |
train_kwargs = { | |
"transcript_path": download_transcript(split="train"), | |
"audio_archives": download_audio_streaming(split="train"), | |
"local_audio_archives_paths": download_audio_non_streaming(split="train") | |
if not dl_manager.is_streaming else None | |
} | |
train_splits = [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, gen_kwargs=train_kwargs | |
), | |
datasets.SplitGenerator( | |
name="train.9h", | |
gen_kwargs={ | |
**train_kwargs, | |
"limited_ids_paths": download_limited_ids(sub_folder="limited_supervision/9hr"), | |
}, | |
), | |
datasets.SplitGenerator( | |
name="train.1h", | |
gen_kwargs={ | |
**train_kwargs, | |
"limited_ids_paths": download_limited_ids(sub_folder="limited_supervision/1hr"), | |
}, | |
), | |
] | |
return train_splits + [ | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, gen_kwargs={ | |
"transcript_path": download_transcript(split="dev"), | |
"audio_archives": download_audio_streaming(split="dev"), | |
"local_audio_archives_paths": download_audio_non_streaming(split="dev") | |
if not dl_manager.is_streaming else None | |
} | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, gen_kwargs={ | |
"transcript_path": download_transcript(split="test"), | |
"audio_archives": download_audio_streaming(split="test"), | |
"local_audio_archives_paths": download_audio_non_streaming(split="test") | |
if not dl_manager.is_streaming else None | |
} | |
), | |
] | |
def _generate_examples(self, transcript_path, audio_archives, local_audio_archives_paths, limited_ids_paths=None): | |
"""Generate examples from a Multilingual LibriSpeech data dir.""" | |
transcripts = dict() | |
with open(transcript_path, "r", encoding="utf-8") as file: | |
for line in file: | |
audio_id, transcript = line.strip().split("\t") | |
transcripts[audio_id] = transcript | |
limited_ids, limited_ids_archives_names = [], [] | |
if limited_ids_paths: | |
for path in limited_ids_paths: | |
with open(path, "r", encoding="utf-8") as file: | |
limited_ids.extend([line.strip() for line in file.readlines()]) | |
limited_ids = set(limited_ids) | |
for archive_idx, audio_archive in enumerate(audio_archives): | |
# TODO: check that archive doesn't contain needed ids | |
# if limited_ids and audio_archive not in limited_ids_archives_names: | |
# continue | |
for audio_filename, file in audio_archive: | |
speaker_id, chapter_id = audio_filename.split("_")[:2] | |
speaker_id, chapter_id = int(speaker_id), int(chapter_id) | |
audio_id = audio_filename.split(".flac")[0] | |
audio_transcript = transcripts[audio_id] | |
if limited_ids and audio_id not in limited_ids: | |
# this only can be true in limited supervision sets ("train.9h" and "train.1h") | |
continue | |
path = os.path.join(local_audio_archives_paths[archive_idx], audio_filename)\ | |
if local_audio_archives_paths else audio_filename | |
yield audio_filename, { | |
"file": path if local_audio_archives_paths else None, | |
"audio": {"path": path, "bytes": file.read()}, | |
"text": audio_transcript, | |
"speaker_id": speaker_id, | |
"chapter_id": chapter_id, | |
"id": audio_id | |
} | |
def download_extract_limited_ids(dl_manager, root_dir, sub_folder): | |
"""Download handles.txt files containing ids for limited supervision train sets. """ | |
sub_path = os.path.join(root_dir, "train", sub_folder) | |
if sub_folder.endswith("9hr"): | |
limited_ids_paths = [os.path.join(sub_path, "handles.txt")] | |
else: # => sub_folder.endswith("1hr") | |
# in case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like: | |
# "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ... | |
limited_ids_paths = [os.path.join(sub_path, str(i), "handles.txt") for i in range(6)] | |
limited_ids_paths = dl_manager.download(limited_ids_paths) | |
return limited_ids_paths | |
def download_extract_transcript(dl_manager, root_dir, split): | |
""" | |
Download file with audio transcriptions. | |
Return: | |
path (str): path to locally extracted `transcripts.txt` file | |
""" | |
transcript_path = os.path.join(root_dir, split, "transcripts.txt") | |
return dl_manager.download(transcript_path) | |
def download_audio_archive_paths(dl_manager, root_dir, split): | |
# each split contains many .tar.gz archives with its audio files | |
# audio_filenames.txt contains the names of these archives | |
split_dir = os.path.join(root_dir, split) | |
audio_filenames_path = dl_manager.download(os.path.join(split_dir, "audio_filenames.txt")) | |
with open(audio_filenames_path, "r", encoding="utf-8") as file: | |
audio_filenames = [line.strip() for line in file.readlines()] | |
return dl_manager.download([os.path.join(split_dir, "audio", filename) for filename in audio_filenames]) | |
# for non-streaming case | |
def download_extract_audio_archives(dl_manager, root_dir, split): | |
""" | |
Download and extract audio archives locally. | |
Return: | |
archive_paths (List `str`): paths to locally extracted archives | |
""" | |
archive_paths = download_audio_archive_paths(dl_manager, root_dir, split) | |
return [dl_manager.extract(archive_path) for archive_path in archive_paths] | |
# for streaming case | |
def download_audio_archives(dl_manager, root_dir, split): | |
"""Prepare archives with audio files for iterating over them. | |
Return: | |
audio_archives (List `Generator`): list of generators to iterate over files in each audio archive. | |
""" | |
archive_paths = download_audio_archive_paths(dl_manager, root_dir, split) | |
return [dl_manager.iter_archive(archive_path) for archive_path in archive_paths] |