Datasets:

Multilinguality:
multilingual
Size Categories:
100K<n<1M
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
License:
kathbath / kathbath.py
kaushal98b's picture
Update kathbath.py
6dce44c
""" Kathbath Dataset"""
import csv
import os
import tarfile
import datasets
from datasets.utils.py_utils import size_str
from .languages import LANGUAGES
from .release_stats import STATS
_CITATION = """\
@misc{https://doi.org/10.48550/arxiv.2208.11761,
doi = {10.48550/ARXIV.2208.11761},
url = {https://arxiv.org/abs/2208.11761},
author = {Javed, Tahir and Bhogale, Kaushal Santosh and Raman, Abhigyan and Kunchukuttan, Anoop and Kumar, Pratyush and Khapra, Mitesh M.},
title = {IndicSUPERB: A Speech Processing Universal Performance Benchmark for Indian languages},
publisher = {arXiv},
year = {2022},
copyright = {arXiv.org perpetual, non-exclusive license}
}
"""
_HOMEPAGE = "https://ai4bharat.iitm.ac.in/indic-superb/"
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
_DATA_URL = "https://huggingface.co/datasets/ai4bharat/kathbath/resolve/main/data"
class KathbathConfig(datasets.BuilderConfig):
"""BuilderConfig for Kathbath."""
def __init__(self, name, version, **kwargs):
self.language = kwargs.pop("language", None)
self.release_date = kwargs.pop("release_date", None)
self.num_clips = kwargs.pop("num_clips", None)
self.num_speakers = kwargs.pop("num_speakers", None)
self.total_hr = kwargs.pop("total_hr", None)
self.size_bytes = kwargs.pop("size_bytes", None)
self.size_human = size_str(self.size_bytes)
description = (
f"Kathbath speech to text dataset in {self.language} released on {self.release_date}. "
f"The dataset comprises {self.total_hr} hours of transcribed speech data"
)
super(KathbathConfig, self).__init__(
name=name,
version=datasets.Version(version),
description=description,
**kwargs,
)
class Kathbath(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "_all_"
BUILDER_CONFIGS = [
KathbathConfig(
name=lang,
version=STATS["version"],
language=LANGUAGES[lang],
release_date=STATS["date"],
# num_clips=lang_stats["clips"],
# num_speakers=lang_stats["users"],
total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
# size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
)
for lang, lang_stats in STATS["locales"].items()
]
def _info(self):
total_languages = len(STATS["locales"])
total_hours = self.config.total_hr
description = (
"LibriVox-Indonesia is a speech dataset generated from LibriVox with only languages from Indonesia."
f"The dataset currently consists of {total_hours} hours of speech "
f"in {total_languages} languages, but more voices and languages are always added."
)
features = datasets.Features(
{
"path": datasets.Value("string"),
"language": datasets.Value("string"),
"speaker": datasets.Value("string"),
"sentence": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16000)
}
)
return datasets.DatasetInfo(
description=description,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
version=self.config.version,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_manager.download_config.ignore_url_params = True
audio_path = {}
local_extracted_archive = {}
metadata_path = {}
split_type = {"train": datasets.Split.TRAIN, "valid": datasets.Split.VALIDATION, "test_unknown": datasets.Split.TEST, "test_known": datasets.Split.TEST}
for split in split_type:
if split == 'train':
audio_paths = [
f"{_DATA_URL}/audio_{split}.tar.partaa",
f"{_DATA_URL}/audio_{split}.tar.partab",
f"{_DATA_URL}/audio_{split}.tar.partac",
]
audio_path[split] = dl_manager.download(audio_paths)
for path in audio_path[split]:
try:
local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
except tarfile.ReadError:
pass
else:
audio_paths = [f"{_DATA_URL}/audio_{split}.tar"]
audio_path[split] = dl_manager.download(audio_paths)
local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
metadata_path[split] = dl_manager.download(f"{_DATA_URL}/metata_{split}.tsv")
path_to_clips = "kb_data_clean_m4a"
return [
datasets.SplitGenerator(
name=split_type[split],
gen_kwargs={
"local_extracted_archive": local_extracted_archive[split],
"audio_files": dl_manager.iter_archive(audio_path[split]),
"metadata_path": metadata_path[split],
"path_to_clips": path_to_clips,
},
) for split in split_type
]
def _generate_examples(
self,
local_extracted_archive,
audio_files,
metadata_path,
path_to_clips,
):
"""Yields examples."""
data_fields = list(self._info().features.keys())
metadata = {}
with open(metadata_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
if self.config.name == "_all_" or self.config.name == row["language"]:
row["path"] = os.path.join(path_to_clips, row["path"])
# if data is incomplete, fill with empty values
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["path"]] = row
id_ = 0
for path in audio_files:
print(path)
if path in metadata:
result = dict(metadata[path])
# set the audio feature and the path to the extracted file
path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
result["audio"] = {"path": path, "bytes": f.read()}
result["path"] = path
yield id_, result
id_ += 1