""" NENA Speech Dataset""" import csv import os import json import datasets from datasets.utils.py_utils import size_str from tqdm import tqdm # _CITATION = """\ # """ # _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets" # _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/" # TODO: change this _BASE_URL = "./" _AUDIO_URL = _BASE_URL + "audio/{dialect}/{split}.tar" _TRANSCRIPT_URL = _BASE_URL + "transcript/{dialect}/{split}.tsv" import datasets class NENASpeechConfig(datasets.BuilderConfig): """BuilderConfig for NENASpeech.""" def __init__(self, name, version, **kwargs): self.language = kwargs.pop("language", None) description = ( f"This is a test. " ) super(NENASpeechConfig, self).__init__( name=name, version=datasets.Version(version), description=description, **kwargs, ) class NENASpeech(datasets.GeneratorBasedBuilder): DEFAULT_WRITER_BATCH_SIZE = 1000 BUILDER_CONFIGS = [ NENASpeechConfig( name='curmi', version='1.0.4', language='assyrian', ), NENASpeechConfig( name='jurmi', version='1.0.4', language='assyrian', ) # for lang, lang_stats in STATS["locales"].items() ] def _info(self): # total_languages = len(STATS["locales"]) # total_valid_hours = STATS["totalValidHrs"] description = ( "description from _info" # "Common Voice is Mozilla's initiative to help teach machines how real people speak. " # f"The dataset currently consists of {total_valid_hours} validated hours of speech " # f" in {total_languages} languages, but more voices and languages are always added." ) features = datasets.Features( { "transcription": datasets.Value("string"), "translation": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=48_000), "path": datasets.Value("string"), "age": datasets.Value("string"), } ) return datasets.DatasetInfo( description=description, # citation=_CITATION, # homepage=_HOMEPAGE, # license=_LICENSE, features=features, supervised_keys=None, version=self.config.version, ) def _split_generators(self, dl_manager): dialect = self.config.name audio_urls = {} splits = ("train", "dev", "test", "other", "invalidated") for split in splits: audio_urls[split] = _AUDIO_URL.format(dialect=dialect, split=split) archive_paths = dl_manager.download(audio_urls) local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} meta_urls = {split: _TRANSCRIPT_URL.format(dialect=dialect, split=split) for split in splits} meta_paths = dl_manager.download_and_extract(meta_urls) split_generators = [] split_names = { "train": datasets.Split.TRAIN, "dev": datasets.Split.VALIDATION, "test": datasets.Split.TEST, } for split in splits: split_generators.append( datasets.SplitGenerator( name=split_names.get(split, split), gen_kwargs={ "local_extracted_archive_paths": local_extracted_archive_paths.get(split), "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)], "meta_path": meta_paths[split], }, ), ) return split_generators def _generate_examples(self, local_extracted_archive_paths, archives, meta_path): data_fields = list(self._info().features.keys()) metadata = {} with open(meta_path, encoding="utf-8") as f: reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) for row in tqdm(reader, desc="Reading metadata..."): if not row["path"].endswith(".mp3"): row["path"] += ".mp3" # accent -> accents in CV 8.0 if "accents" in row: row["accent"] = row["accents"] del row["accents"] # if data is incomplete, fill with empty values for field in data_fields: if field not in row: row[field] = "" metadata[row["path"]] = row for i, audio_archive in enumerate(archives): for path, file in audio_archive: _, filename = os.path.split(path) if filename in metadata: result = dict(metadata[filename]) # set the audio feature and the path to the extracted file path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path result["audio"] = {"path": path, "bytes": file.read()} result["path"] = path yield path, result