hy_wiki_openai_tts / hy_wiki_openai_tts.py
aburnazy's picture
Upload folder using huggingface_hub
6edde48 verified
##
import os
import pandas as pd
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version
_URLS = {"train": "https://huggingface.co/datasets/aburnazy/hy_wiki_openai_tts/resolve/main/data/dataset.tar.gz"}
class HyAsrGrqaser(GeneratorBasedBuilder):
"""Armenian Audio-Transcription Dataset"""
VERSION = Version("1.0.0")
def _info(self):
return DatasetInfo(
description="This dataset contains Armenian speech and transcriptions.",
features=Features({
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
'sentence': Value('string')
}),
supervised_keys=("audio", "sentence"),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Assuming the script is in the root of the project structure
# metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
metadata_path = dl_manager.download_and_extract(
"https://huggingface.co/datasets/aburnazy/hy_wiki_openai_tts/resolve/main/metadata.csv")
data_dir = dl_manager.download_and_extract(_URLS)
print(f"----------data_dir: {data_dir}, \n----------metadata_path: {metadata_path}")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"data_dir": data_dir['train'], "metadata_path": metadata_path}
),
]
def _generate_examples(self, data_dir, metadata_path):
print(f"data_dir: {data_dir}, metadata_path: {metadata_path}")
"""Yields examples."""
# Load metadata.csv
metadata = pd.read_csv(metadata_path)
# Generate examples
for idx, row in metadata.iterrows():
file_path = os.path.join(data_dir, row['file_name'])
transcription = row['transcription']
yield idx, {
'audio': {'path': file_path},
'sentence': transcription
}
# Testing the dataset locally
# if __name__ == "__main__":
# from datasets import load_dataset
# dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
# print(dataset["train"][0])
##
# from datasets import load_dataset
#
# dataset = load_dataset("aburnazy/hy_asr_grqaser")
# print('------------------')
# print(dataset["train"][0])