# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Multilingual Librispeech automatic speech recognition dataset.""" from functools import partial import os import datasets from datasets.tasks import AutomaticSpeechRecognition _CITATION = """\ @article{Pratap2020MLSAL, title={MLS: A Large-Scale Multilingual Dataset for Speech Research}, author={Vineel Pratap and Qiantong Xu and Anuroop Sriram and Gabriel Synnaeve and Ronan Collobert}, journal={ArXiv}, year={2020}, volume={abs/2012.03411} } """ _DESCRIPTION = """\ This is a streamable version of the Multilingual LibriSpeech (MLS) dataset. The data archives were restructured from the original ones from [OpenSLR](http://www.openslr.org/94) to make it easier to stream. MLS dataset is a large multilingual corpus suitable for speech research. The dataset is derived from read audiobooks from LibriVox and consists of 8 languages: English, German, Dutch, Spanish, French, Italian, Portuguese, Polish. """ _URL = "http://www.openslr.org/94" _DL_URL_FORMAT = "data/mls_{name}" class MultilingualLibrispeechConfig(datasets.BuilderConfig): """BuilderConfig for MultilingualLibrispeech.""" def __init__(self, name, **kwargs): """ Args: name: `string`, name of dataset config (=language) **kwargs: keyword arguments forwarded to super. """ super(MultilingualLibrispeechConfig, self).__init__( version=datasets.Version("2.1.0", ""), name=name, **kwargs ) # relative path to full data inside a repo (for example `data/mls_german`) self.data_root_dir = _DL_URL_FORMAT.format(name=name) class MultilingualLibrispeech(datasets.GeneratorBasedBuilder): """Multilingual Librispeech dataset.""" BUILDER_CONFIGS = [ MultilingualLibrispeechConfig(name="german", description="German LibriSpeech dataset"), MultilingualLibrispeechConfig(name="dutch", description="Dutch LibriSpeech dataset"), MultilingualLibrispeechConfig(name="french", description="French LibriSpeech dataset"), MultilingualLibrispeechConfig(name="spanish", description="Spanish LibriSpeech dataset"), MultilingualLibrispeechConfig(name="italian", description="Italian LibriSpeech dataset"), MultilingualLibrispeechConfig(name="portuguese", description="Portuguese LibriSpeech dataset"), MultilingualLibrispeechConfig(name="polish", description="Polish LibriSpeech dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "file": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=16_000), "text": datasets.Value("string"), "speaker_id": datasets.Value("int64"), "chapter_id": datasets.Value("int64"), "id": datasets.Value("string"), } ), supervised_keys=("file", "text"), homepage=_URL, citation=_CITATION, task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")], ) def _split_generators(self, dl_manager): download_transcript = partial( download_extract_transcript, dl_manager=dl_manager, root_dir=self.config.data_root_dir ) download_audio = partial( download_audio_archives, dl_manager=dl_manager, root_dir=self.config.data_root_dir ) download_limited_ids = partial( download_extract_limited_ids, dl_manager=dl_manager, root_dir=self.config.data_root_dir ) train_kwargs = { "transcript_path": download_transcript(split="train"), "audio_archives": download_audio(split="train") } train_splits = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs=train_kwargs ), datasets.SplitGenerator( name="train.9h", gen_kwargs={ **train_kwargs, "limited_ids_paths": download_limited_ids(sub_folder="limited_supervision/9hr"), }, ), datasets.SplitGenerator( name="train.1h", gen_kwargs={ **train_kwargs, "limited_ids_paths": download_limited_ids(sub_folder="limited_supervision/1hr"), }, ), ] return train_splits + [ datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "transcript_path": download_transcript(split="dev"), "audio_archives": download_audio(split="dev"), } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "transcript_path": download_transcript(split="test"), "audio_archives": download_audio(split="test"), } ), ] def _generate_examples(self, transcript_path, audio_archives, limited_ids_paths=None): """Generate examples from a Multilingual LibriSpeech data dir.""" transcripts = dict() with open(transcript_path, "r", encoding="utf-8") as file: for line in file: audio_id, transcript = line.split("\t") transcripts[audio_id] = transcript limited_ids, limited_ids_archives_names = [], [] if limited_ids_paths: for path in limited_ids_paths: with open(path, "r", encoding="utf-8") as file: limited_ids.extend([line.strip() for line in file.readlines()]) limited_ids = set(limited_ids) for audio_archive in audio_archives: # TODO: check that archive doesn't contain needed ids # if limited_ids and audio_archive not in limited_ids_archives_names: # continue for audio_filename, file in audio_archive: speaker_id, chapter_id = audio_filename.split("_")[:2] speaker_id, chapter_id = int(speaker_id), int(chapter_id) audio_id = audio_filename.split(".flac")[0] audio_transcript = transcripts[audio_id] if limited_ids and audio_id not in limited_ids: # this only can be true in limited supervision sets ("train.9h" and "train.1h") continue yield audio_filename, { "file": audio_filename, "audio": {"path": audio_filename, "bytes": file.read()}, "text": audio_transcript, "speaker_id": speaker_id, "chapter_id": chapter_id, "id": audio_id } def download_extract_limited_ids(dl_manager, root_dir, sub_folder): """Download and extract all handles.txt files containing ids for limited supervision train sets. """ sub_path = os.path.join(root_dir, "train", sub_folder) if sub_folder.endswith("9hr"): limited_ids_paths = [os.path.join(sub_path, "handles.txt")] else: # => sub_folder.endswith("1hr") # in case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like: # "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ... limited_ids_paths = [os.path.join(sub_path, str(i), "handles.txt") for i in range(6)] limited_ids_paths = dl_manager.download_and_extract(limited_ids_paths) return limited_ids_paths def download_extract_transcript(dl_manager, root_dir, split): """Downloading and extracting file with audio transcriptions. """ transcript_path = os.path.join(root_dir, split, "transcripts.txt") return dl_manager.download_and_extract(transcript_path) def download_audio_archives(dl_manager, root_dir, split): """Prepare archives with audio files for iterating over them. Return: audio_archives (List `Generator`): list of generators to iterate over files in each audio archive. """ # each split contains many .tar.gz archives with its audio files # audio_filenames.txt contains the names of these archives split_dir = os.path.join(root_dir, split) audio_filenames_path = dl_manager.download_and_extract(os.path.join(split_dir, "audio_filenames.txt")) with open(audio_filenames_path, "r", encoding="utf-8") as file: audio_filenames = [line.strip() for line in file.readlines()] archive_paths = dl_manager.download([os.path.join(split_dir, "audio", filename) for filename in audio_filenames]) audio_archives = [dl_manager.iter_archive(archive_path) for archive_path in archive_paths] return audio_archives