File size: 2,428 Bytes
6edde48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
##
import os
import pandas as pd
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version

_URLS = {"train": "https://huggingface.co/datasets/aburnazy/hy_wiki_openai_tts/resolve/main/data/dataset.tar.gz"}

class HyAsrGrqaser(GeneratorBasedBuilder):
    """Armenian Audio-Transcription Dataset"""

    VERSION = Version("1.0.0")

    def _info(self):
        return DatasetInfo(
            description="This dataset contains Armenian speech and transcriptions.",
            features=Features({
                'audio': Audio(sampling_rate=16_000),  # Adjust the sampling rate as needed
                'sentence': Value('string')
            }),
            supervised_keys=("audio", "sentence"),
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # Assuming the script is in the root of the project structure
        # metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
        metadata_path = dl_manager.download_and_extract(
            "https://huggingface.co/datasets/aburnazy/hy_wiki_openai_tts/resolve/main/metadata.csv")
        data_dir = dl_manager.download_and_extract(_URLS)
        print(f"----------data_dir: {data_dir}, \n----------metadata_path: {metadata_path}")
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={"data_dir": data_dir['train'], "metadata_path": metadata_path}
            ),
        ]

    def _generate_examples(self, data_dir, metadata_path):
        print(f"data_dir: {data_dir}, metadata_path: {metadata_path}")
        """Yields examples."""
        # Load metadata.csv
        metadata = pd.read_csv(metadata_path)

        # Generate examples
        for idx, row in metadata.iterrows():
            file_path = os.path.join(data_dir, row['file_name'])
            transcription = row['transcription']
            yield idx, {
                'audio': {'path': file_path},
                'sentence': transcription
            }

# Testing the dataset locally
# if __name__ == "__main__":
#     from datasets import load_dataset
#     dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
#     print(dataset["train"][0])
##
# from datasets import load_dataset
#
# dataset = load_dataset("aburnazy/hy_asr_grqaser")
# print('------------------')
# print(dataset["train"][0])