Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- README.md +2 -0
- hy_wiki_openai_tts.py +63 -0
- metadata.csv +0 -0
- requirements.txt +3 -0
.gitattributes
CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
data/* filter=lfs diff=lfs merge=lfs -text
|
57 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# hy_wiki_openai_tts
|
2 |
+
Huggingface dataset containing transcripts from Armenian wikipedia pages generated by OpenAI TTS
|
hy_wiki_openai_tts.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
##
|
2 |
+
import os
|
3 |
+
import pandas as pd
|
4 |
+
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version
|
5 |
+
|
6 |
+
_URLS = {"train": "https://huggingface.co/datasets/aburnazy/hy_wiki_openai_tts/resolve/main/data/dataset.tar.gz"}
|
7 |
+
|
8 |
+
class HyAsrGrqaser(GeneratorBasedBuilder):
|
9 |
+
"""Armenian Audio-Transcription Dataset"""
|
10 |
+
|
11 |
+
VERSION = Version("1.0.0")
|
12 |
+
|
13 |
+
def _info(self):
|
14 |
+
return DatasetInfo(
|
15 |
+
description="This dataset contains Armenian speech and transcriptions.",
|
16 |
+
features=Features({
|
17 |
+
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
|
18 |
+
'sentence': Value('string')
|
19 |
+
}),
|
20 |
+
supervised_keys=("audio", "sentence"),
|
21 |
+
)
|
22 |
+
|
23 |
+
def _split_generators(self, dl_manager):
|
24 |
+
"""Returns SplitGenerators."""
|
25 |
+
# Assuming the script is in the root of the project structure
|
26 |
+
# metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
|
27 |
+
metadata_path = dl_manager.download_and_extract(
|
28 |
+
"https://huggingface.co/datasets/aburnazy/hy_wiki_openai_tts/resolve/main/metadata.csv")
|
29 |
+
data_dir = dl_manager.download_and_extract(_URLS)
|
30 |
+
print(f"----------data_dir: {data_dir}, \n----------metadata_path: {metadata_path}")
|
31 |
+
return [
|
32 |
+
SplitGenerator(
|
33 |
+
name=Split.TRAIN,
|
34 |
+
gen_kwargs={"data_dir": data_dir['train'], "metadata_path": metadata_path}
|
35 |
+
),
|
36 |
+
]
|
37 |
+
|
38 |
+
def _generate_examples(self, data_dir, metadata_path):
|
39 |
+
print(f"data_dir: {data_dir}, metadata_path: {metadata_path}")
|
40 |
+
"""Yields examples."""
|
41 |
+
# Load metadata.csv
|
42 |
+
metadata = pd.read_csv(metadata_path)
|
43 |
+
|
44 |
+
# Generate examples
|
45 |
+
for idx, row in metadata.iterrows():
|
46 |
+
file_path = os.path.join(data_dir, row['file_name'])
|
47 |
+
transcription = row['transcription']
|
48 |
+
yield idx, {
|
49 |
+
'audio': {'path': file_path},
|
50 |
+
'sentence': transcription
|
51 |
+
}
|
52 |
+
|
53 |
+
# Testing the dataset locally
|
54 |
+
# if __name__ == "__main__":
|
55 |
+
# from datasets import load_dataset
|
56 |
+
# dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
|
57 |
+
# print(dataset["train"][0])
|
58 |
+
##
|
59 |
+
# from datasets import load_dataset
|
60 |
+
#
|
61 |
+
# dataset = load_dataset("aburnazy/hy_asr_grqaser")
|
62 |
+
# print('------------------')
|
63 |
+
# print(dataset["train"][0])
|
metadata.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
librosa
|
3 |
+
datasets
|