!pip install pysrt""" Babelbox Voice Dataset""" import os import csv import codecs import datasets from typing import List from pathlib import Path from tqdm import tqdm logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{babelboxvoice:2022, author = {Andersson, O. and Bjelkenhed, M. and Bielsa, M. et al}, title = {Babelbox Voice: A Speech Corpus for training Whisper}, year = 2022 } """ _HF_REPO_PATH = "https://huggingface.co/datasets/babelbox/babelbox_voice/" class BabelboxVoiceConfig(datasets.BuilderConfig): """BuilderConfig for BabelboxVoice.""" def __init__(self, name, version, **kwargs): self.name = name self.version = version self.features = kwargs.pop("features", None) self.description = kwargs.pop("description", None) self.data_url = kwargs.pop("data_url", None) self.nb_data_shards = kwargs.pop("nb_data_shards", None) self.metadata_url = kwargs.pop("metadata_url", None) description = ( f"Babelbox Voice speech to text dataset." ) super(BabelboxVoiceConfig, self).__init__( name=name, version=version, **kwargs, ) class BabelboxVoice(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ BabelboxVoiceConfig( name="nst", version=VERSION, description="This part of Babel Voice includes data from National Library of Norway", features=["path", "audio", "sentence"], data_url= _HF_REPO_PATH + "resolve/main/data/nst/nst-data-{:0>3d}.tar.gz", nb_data_shards = 42, metadata_url= _HF_REPO_PATH + "resolve/main/data/nst/metadata.tar.gz" ) ] DEFAULT_CONFIG_NAME = "nst" def _info(self): description = ( "Babelbox Voice is an initiative to help teach machines how real people speak. " ) if self.config.name == "nst": features = datasets.Features( { "path": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=16_000), "sentence": datasets.Value("string") } ) else: features = datasets.Features( { "path": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=16_000), "sentence": datasets.Value("string") } ) return datasets.DatasetInfo( description=description, features=features, supervised_keys=None, version=self.config.version ) def get_metadata(self, dl_manager, metadata_url): if metadata_url == None: return None metadata_path = dl_manager.download(metadata_url) local_extracted_metadata_path = dl_manager.extract(metadata_path) if not dl_manager.is_streaming else None metadata_archive = dl_manager.iter_archive(metadata_path) metadata = {} for path, file in metadata_archive: reader = csv.DictReader(codecs.iterdecode(file, 'utf-8')) for row in tqdm(reader, desc="Reading metadata..."): filename = row['filename_channel_1'] sentence = row['text'] metadata[filename] = sentence return metadata def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: download_urls = [self.config.data_url.format(i) for i in range(1, self.config.nb_data_shards + 1) ] archive_paths = dl_manager.download(download_urls) local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} metadata = self.get_metadata(dl_manager, self.config.metadata_url) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ "local_extracted_archive_paths": local_extracted_archive_paths, "archives": [dl_manager.iter_archive(path) for path in archive_paths], "metadata": metadata }) ] def _generate_examples(self, local_extracted_archive_paths, archives, metadata): sampling_rate = 16000 for i, audio_archive in enumerate(archives): for path, file in audio_archive: if local_extracted_archive_paths == False: path = os.path.join(local_extracted_archive_paths[i], path) result = dict() result["path"] = path result["audio"] = {"path": path, "bytes": file.read()} result["sentence"] = metadata[path] yield path, result