rakeffet / rakeffet.py
izzy-lazerson's picture
Upload 3 files
7c15b5c
"""Rakeffet dataset."""
import os
import datasets
from datasets import load_dataset
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@inproceedings{Zandie2021RakeffetAC,
title={Rakeffet AI},
author={Yisroel Lazerson},
booktitle={Cooolio},
year={2022}
}
"""
_DESCRIPTION = "Rakeffet is cool."
_URL = "google.com"
_NAME = "rakeffet"
_DL_URLS = {
"dev": "https://huggingface.co/datasets/izzy-lazerson/rakeffet/resolve/main/data/dev.tar.gz",
"test": "https://huggingface.co/datasets/izzy-lazerson/rakeffet/resolve/main/data/test.tar.gz",
"train": "https://huggingface.co/datasets/izzy-lazerson/rakeffet/resolve/main/data/train.tar.gz"
}
class RakeffetConfig(datasets.BuilderConfig):
"""BuilderConfig for Rakeffet."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
super(
RakeffetConfig,
self
).__init__(
version=datasets.Version("1.0.0", ""),
**kwargs
)
class Rakeffet(datasets.GeneratorBasedBuilder):
"""Rakeffet dataset."""
BUILDER_CONFIGS = [
RakeffetConfig(
name=_NAME,
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
}
),
supervised_keys=("id", "text"),
homepage=_URL,
citation=_CITATION,
task_templates=[
AutomaticSpeechRecognition(
audio_column="audio",
transcription_column="text"
)
],
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_DL_URLS)
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
train_splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("train"),
"files": dl_manager.iter_archive(archive_path["train"]),
},
)
]
dev_splits = [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("dev"),
"files": dl_manager.iter_archive(archive_path["dev"]),
},
)
]
test_splits = [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("test"),
"files": dl_manager.iter_archive(archive_path["test"]),
},
)
]
return train_splits + dev_splits + test_splits
def _generate_examples(self, files, local_extracted_archive):
"""Generate examples from a Rakeffet archive_path."""
audio_data = {}
transcripts = {}
paths = {}
for path, f in files:
if path.endswith(".mp3"):
id_ = path.split("/")[-1][: -len(".mp3")]
audio_data[id_] = f.read()
paths[id_] = os.path.join(local_extracted_archive, path)
elif path.endswith(".csv"):
for line in f:
line_fields = line.decode("utf-8").split(',')
id_ = line_fields[0]
transcripts[id_] = line_fields[1].strip()
for key, id_ in enumerate(transcripts):
yield key, {"audio": {"bytes": audio_data[id_],
"path": paths[id_]},
"text": transcripts[id_],
"id": id_}