izzy-lazerson commited on
Commit
7c15b5c
1 Parent(s): efcaf4f

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +25 -0
  2. dataset_infos.json +23 -0
  3. rakeffet.py +137 -0
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Rakeffet
3
+ annotations_creators:
4
+ - expert-generated
5
+ language_creators:
6
+ - expert-generated
7
+ language:
8
+ - en
9
+ license:
10
+ - cc-by-nc-4.0
11
+ multilinguality:
12
+ - monolingual
13
+ source_datasets:
14
+ - original
15
+ task_categories:
16
+ - automatic-speech-recognition
17
+ - audio-classification
18
+ - speech-synthesis
19
+ ---
20
+
21
+ # Dataset Card for Rakeffet
22
+
23
+
24
+
25
+
dataset_infos.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "description": "rakeffet",
3
+ "homepage": "google.com",
4
+ "supervised_keys": {
5
+ "input": "id",
6
+ "output": "text"
7
+ },
8
+ "task_templates": [
9
+ {
10
+ "task": "automatic-speech-recognition",
11
+ "audio_column": "audio",
12
+ "transcription_column": "text"
13
+ }
14
+ ],
15
+ "builder_name": "rakeffet",
16
+ "version": {
17
+ "version_str": "1.0.0",
18
+ "description": "",
19
+ "major": 2,
20
+ "minor": 1,
21
+ "patch": 0
22
+ }
23
+ }
rakeffet.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Rakeffet dataset."""
2
+
3
+
4
+ import os
5
+
6
+ import datasets
7
+ from datasets import load_dataset
8
+ from datasets.tasks import AutomaticSpeechRecognition
9
+
10
+
11
+ _CITATION = """\
12
+ @inproceedings{Zandie2021RakeffetAC,
13
+ title={Rakeffet AI},
14
+ author={Yisroel Lazerson},
15
+ booktitle={Cooolio},
16
+ year={2022}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = "Rakeffet is cool."
21
+ _URL = "google.com"
22
+ _NAME = "rakeffet"
23
+
24
+ _DL_URLS = {
25
+ "dev": "https://huggingface.co/datasets/izzy-lazerson/rakeffet/resolve/main/data/dev.tar.gz",
26
+ "test": "https://huggingface.co/datasets/izzy-lazerson/rakeffet/resolve/main/data/test.tar.gz",
27
+ "train": "https://huggingface.co/datasets/izzy-lazerson/rakeffet/resolve/main/data/train.tar.gz"
28
+ }
29
+
30
+
31
+ class RakeffetConfig(datasets.BuilderConfig):
32
+ """BuilderConfig for Rakeffet."""
33
+
34
+ def __init__(self, **kwargs):
35
+ """
36
+ Args:
37
+ data_dir: `string`, the path to the folder containing the files in the
38
+ downloaded .tar
39
+ citation: `string`, citation for the data set
40
+ url: `string`, url for information about the data set
41
+ **kwargs: keyword arguments forwarded to super.
42
+ """
43
+ super(
44
+ RakeffetConfig,
45
+ self
46
+ ).__init__(
47
+ version=datasets.Version("1.0.0", ""),
48
+ **kwargs
49
+ )
50
+
51
+
52
+ class Rakeffet(datasets.GeneratorBasedBuilder):
53
+ """Rakeffet dataset."""
54
+
55
+ BUILDER_CONFIGS = [
56
+ RakeffetConfig(
57
+ name=_NAME,
58
+ )
59
+ ]
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "id": datasets.Value("string"),
67
+ "audio": datasets.Audio(sampling_rate=16_000),
68
+ "text": datasets.Value("string"),
69
+ }
70
+ ),
71
+ supervised_keys=("id", "text"),
72
+ homepage=_URL,
73
+ citation=_CITATION,
74
+ task_templates=[
75
+ AutomaticSpeechRecognition(
76
+ audio_column="audio",
77
+ transcription_column="text"
78
+ )
79
+ ],
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ archive_path = dl_manager.download(_DL_URLS)
84
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
85
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
86
+
87
+ train_splits = [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={
91
+ "local_extracted_archive": local_extracted_archive.get("train"),
92
+ "files": dl_manager.iter_archive(archive_path["train"]),
93
+ },
94
+ )
95
+ ]
96
+ dev_splits = [
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.VALIDATION,
99
+ gen_kwargs={
100
+ "local_extracted_archive": local_extracted_archive.get("dev"),
101
+ "files": dl_manager.iter_archive(archive_path["dev"]),
102
+ },
103
+ )
104
+ ]
105
+ test_splits = [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST,
108
+ gen_kwargs={
109
+ "local_extracted_archive": local_extracted_archive.get("test"),
110
+ "files": dl_manager.iter_archive(archive_path["test"]),
111
+ },
112
+ )
113
+ ]
114
+
115
+ return train_splits + dev_splits + test_splits
116
+
117
+ def _generate_examples(self, files, local_extracted_archive):
118
+ """Generate examples from a Rakeffet archive_path."""
119
+ audio_data = {}
120
+ transcripts = {}
121
+ paths = {}
122
+ for path, f in files:
123
+ if path.endswith(".mp3"):
124
+ id_ = path.split("/")[-1][: -len(".mp3")]
125
+ audio_data[id_] = f.read()
126
+ paths[id_] = os.path.join(local_extracted_archive, path)
127
+ elif path.endswith(".csv"):
128
+ for line in f:
129
+ line_fields = line.decode("utf-8").split(',')
130
+ id_ = line_fields[0]
131
+ transcripts[id_] = line_fields[1].strip()
132
+
133
+ for key, id_ in enumerate(transcripts):
134
+ yield key, {"audio": {"bytes": audio_data[id_],
135
+ "path": paths[id_]},
136
+ "text": transcripts[id_],
137
+ "id": id_}