Datasets:
Tasks:
Automatic Speech Recognition
Formats:
webdataset
Languages:
Uzbek
Size:
10K - 100K
Tags:
audio
License:
import os | |
import tarfile | |
import csv | |
import datasets | |
from datasets.utils.py_utils import size_str | |
from tqdm import tqdm | |
class STTUzbekConfig(datasets.BuilderConfig): | |
"""BuilderConfig for the STT Uzbek Dataset.""" | |
def __init__(self, **kwargs): | |
description = ( | |
"Speech-to-Text dataset for the Uzbek language. " | |
"The dataset contains audio files stored in different folders: " | |
"`wavs`, `uz_other_dataset`, `uz_validated_dataset`, `uz_train_dataset`. " | |
"The corresponding transcriptions are provided in the `metadata.csv` file." | |
) | |
super(STTUzbekConfig, self).__init__(description=description, **kwargs) | |
class STTUzbek(datasets.GeneratorBasedBuilder): | |
DEFAULT_WRITER_BATCH_SIZE = 1000 | |
BUILDER_CONFIGS = [ | |
STTUzbekConfig( | |
name="stt_uzbek", | |
version=datasets.Version("1.0.0"), | |
), | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"file_name": datasets.Value("string"), | |
"audio": datasets.features.Audio(sampling_rate=48_000), | |
"transcription": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=self.config.description, | |
features=features, | |
supervised_keys=None, | |
homepage="https://huggingface.co/datasets/Beehzod/STT_uz", | |
license="Apache License 2.0", | |
citation="""@misc{uzbek_stt_dataset, | |
author = {Beehzod}, | |
title = {Uzbek Speech-to-Text Dataset}, | |
year = {2024}, | |
howpublished = {https://huggingface.co/datasets/Beehzod/STT_uz}, | |
note = {Dataset for Uzbek language speech-to-text tasks.} | |
}""", | |
version=self.config.version, | |
) | |
def _split_generators(self, dl_manager): | |
# Adjust the paths according to your setup | |
wavs_dir = "audio/wavs" | |
uz_other_dir = "audio/uz_other_dataset" | |
uz_validated_dir = "audio/uz_validated_dataset" | |
uz_train_dir = "audio/uz_train_dataset" | |
metadata_file = "metadata.csv" | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"wavs_dir": wavs_dir, | |
"uz_other_dir": uz_other_dir, | |
"uz_validated_dir": uz_validated_dir, | |
"uz_train_dir": uz_train_dir, | |
"metadata_file": metadata_file, | |
}, | |
), | |
] | |
def _generate_examples(self, wavs_dir, uz_other_dir, uz_validated_dir, uz_train_dir, metadata_file): | |
with open(metadata_file, encoding="utf-8") as f: | |
reader = csv.DictReader(f) | |
for row in tqdm(reader, desc="Processing metadata..."): | |
file_name = row["file_name"] | |
transcription = row["transcription"] | |
# Determine the file's location based on the path prefix | |
if file_name.startswith("audio/wavs"): | |
audio_path = os.path.join(wavs_dir, os.path.basename(file_name)) | |
elif file_name.startswith("audio/uz_other_dataset"): | |
audio_path = self._extract_from_tar(uz_other_dir, file_name) | |
elif file_name.startswith("audio/uz_validated_dataset"): | |
audio_path = self._extract_from_tar(uz_validated_dir, file_name) | |
elif file_name.startswith("audio/uz_train_dataset"): | |
audio_path = self._extract_from_tar(uz_train_dir, file_name) | |
else: | |
raise ValueError(f"Unknown path prefix in file_name: {file_name}") | |
# Yield the example | |
yield file_name, { | |
"file_name": file_name, | |
"audio": audio_path, | |
"transcription": transcription, | |
} | |
def _extract_from_tar(self, tar_dir, file_name): | |
# Extract the specific file from the tar archives | |
for tar_file in os.listdir(tar_dir): | |
tar_path = os.path.join(tar_dir, tar_file) | |
with tarfile.open(tar_path, "r") as tar: | |
try: | |
file_path = file_name.split("/")[-1] | |
extracted_file = tar.extractfile(file_path) | |
if extracted_file: | |
return {"path": file_path, "bytes": extracted_file.read()} | |
except KeyError: | |
continue | |
raise FileNotFoundError(f"File {file_name} not found in any tar archives in {tar_dir}.") | |