joha / joha.py
AymanElmar
targz
34a012d
"""SQUAD: The Stanford Question Answering Dataset."""
import os
import json
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_DATA_URL = "https://huggingface.co/datasets/aymanelmar/joha/resolve/main/joha.tar.gz"
_CITATION = """\
@inproceedings{commonvoice:2020,
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
title = {Common Voice: A Massively-Multilingual Speech Corpus},
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
pages = {4211--4215},
year = 2020
}
"""
_DESCRIPTION = """\
Common Voice is Mozilla's initiative to help teach machines how real people speak.
The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices and languages.
"""
class johaDataset(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"file_name": datasets.Value("string"),
"words": datasets.Value("string"),
"duration": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=48_000),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_file_path_column="audio", transcription_column="words")],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download the TAR archive that contains the audio files:
archive_path = dl_manager.download(_DATA_URL)
# First we locate the data using the path within the archive:
path_to_data = "."
path_to_clips = path_to_data
metadata_filepaths = {
split: "/".join([path_to_data, f"{split}.tsv"])
for split in ["train", "test", "validation"]
}
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
local_extracted_archive = dl_manager.extract(archive_path)
# To access the audio data from the TAR archives using the download manager,
# we have to use the dl_manager.iter_archive method.
#
# This is because dl_manager.download_and_extract
# doesn't work to stream TAR archives in streaming mode.
# (we have to stream the files of a TAR archive one by one)
#
# The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
# file in the TAR archive.
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"archive_iterator": dl_manager.iter_archive(
archive_path
),
"archive_iterator2": dl_manager.iter_archive(
archive_path
), # use iter_archive here to access the files in the TAR archives
"metadata_filepath": metadata_filepaths["train"],
"path_to_clips": path_to_clips ,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"archive_iterator": dl_manager.iter_archive(
archive_path
), # use iter_archive here to access the files in the TAR archives
"archive_iterator2": dl_manager.iter_archive(
archive_path
),
"metadata_filepath": metadata_filepaths["test"],
"path_to_clips": path_to_clips ,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"local_extracted_archive": local_extracted_archive,
"archive_iterator": dl_manager.iter_archive(
archive_path
), # use iter_archive here to access the files in the TAR archives
"archive_iterator2": dl_manager.iter_archive(
archive_path
),
"metadata_filepath": metadata_filepaths["validation"],
"path_to_clips": path_to_clips,
},
),
]
def _generate_examples(self, local_extracted_archive, archive_iterator, archive_iterator2, metadata_filepath, path_to_clips):
"""Yields examples."""
data_fields = list(self._info().features.keys())
# audio is not a header of the csv files
data_fields.remove("audio")
path_idx = data_fields.index("file_name")
all_field_values = {}
metadata_found = False
print("metadata_filepath", metadata_filepath)
# Here we iterate over all the files within the TAR archive:
for path, f in archive_iterator:
# Parse the metadata CSV file
if path == metadata_filepath:
metadata_found = True
lines = f.readlines()
headline = lines[0].decode("utf-8")
column_names = headline.strip().split("\t")
assert (
column_names == data_fields
), f"The file should have {data_fields} as column names, but has {column_names}"
for line in lines[1:]:
field_values = line.decode("utf-8").strip().split("\t")
# set full path for mp3 audio file
audio_path = "/".join([path_to_clips,field_values[path_idx]])
all_field_values[audio_path] = field_values
break
for path, f in archive_iterator2:
print("path", path)
if path in all_field_values:
# retrieve the metadata corresponding to this audio file
field_values = all_field_values[path]
result = {key: value for key, value in zip(data_fields, field_values)}
result["audio"] = {"bytes": f.read()}
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
file_name=path
yield file_name, result