JJFrancisco commited on
Commit
0ced86c
1 Parent(s): 0e6bc2c

Update test-user.py

Browse files
Files changed (1) hide show
  1. test-user.py +238 -84
test-user.py CHANGED
@@ -1,15 +1,26 @@
1
  # coding=utf-8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  # Lint as: python3
3
- """test set"""
4
 
5
 
6
- import csv
7
  import os
8
- import json
9
 
10
  import datasets
11
- from datasets.utils.py_utils import size_str
12
- from tqdm import tqdm
13
 
14
 
15
  _CITATION = """\
@@ -24,50 +35,63 @@ _CITATION = """\
24
  """
25
 
26
  _DESCRIPTION = """\
27
- Lorem ipsum
 
 
28
  """
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- _BASE_URL = "https://huggingface.co/datasets/gcjavi/dataviewer-test"
32
- _DATA_URL = "test.zip"
33
- _PROMPTS_URLS = {"test": "test.tsv"}
34
-
35
- logger = datasets.logging.get_logger(__name__)
36
-
37
- class TestConfig(datasets.BuilderConfig):
38
- """Lorem impsum."""
39
-
40
- def __init__(self, name, **kwargs):
41
- # self.language = kwargs.pop("language", None)
42
- # self.release_date = kwargs.pop("release_date", None)
43
- # self.num_clips = kwargs.pop("num_clips", None)
44
- # self.num_speakers = kwargs.pop("num_speakers", None)
45
- # self.validated_hr = kwargs.pop("validated_hr", None)
46
- # self.total_hr = kwargs.pop("total_hr", None)
47
- # self.size_bytes = kwargs.pop("size_bytes", None)
48
- # self.size_human = size_str(self.size_bytes)
49
- description = (
50
- f"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "
51
- f"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud "
52
- f"exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure "
53
- f"dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. "
54
- f"Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt "
55
- f"mollit anim id est laborum."
56
- )
57
- super(TestConfig, self).__init__(
58
- name=name,
59
- description=description,
60
- **kwargs,
61
- )
62
 
63
- class TestASR(datasets.GeneratorBasedBuilder):
64
- """Lorem ipsum."""
65
 
 
 
 
 
 
 
 
 
 
 
66
 
 
 
 
 
 
 
67
  BUILDER_CONFIGS = [
68
- TestConfig(
69
- name="test-dataset",
70
- )
71
  ]
72
 
73
  def _info(self):
@@ -75,51 +99,181 @@ class TestASR(datasets.GeneratorBasedBuilder):
75
  description=_DESCRIPTION,
76
  features=datasets.Features(
77
  {
78
- "audio_id": datasets.Value("string"),
79
  "audio": datasets.Audio(sampling_rate=16_000),
80
- "sentence": datasets.Value("string")
 
 
 
81
  }
82
  ),
83
- supervised_keys=None,
84
- homepage=_BASE_URL,
85
- citation=_CITATION
 
86
  )
87
 
88
  def _split_generators(self, dl_manager):
89
- audio_path = dl_manager.download(_DATA_URL)
90
- local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None
91
- meta_path = dl_manager.download(_PROMPTS_URLS)
92
- return [datasets.SplitGenerator(
93
- name=datasets.Split.TEST,
94
- gen_kwargs={
95
- "meta_path": meta_path["test"],
96
- "audio_files": dl_manager.iter_archive(audio_path),
97
- "local_extracted_archive": local_extracted_archive,
98
- }
99
- )]
100
-
101
- def _generate_examples(self, meta_path, audio_files, local_extracted_archive):
102
- """Lorem ipsum."""
103
- data_fields = list(self._info().features.keys())
104
- metadata = {}
105
- with open(meta_path, encoding="utf-8") as f:
106
- next(f)
107
- for row in f:
108
- print(row)
109
- r = row.split("\t")
110
- print(r)
111
- audio_id = r[0]
112
- sentence = r[1]
113
- metadata[audio_id] = {"audio_id": audio_id,
114
- "sentence": sentence}
115
-
116
- id_ = 0
117
- for path, f in audio_files:
118
- print(path, f)
119
- _, audio_name = os.path.split(path)
120
- if audio_name in metadata:
121
- result = dict(metadata[audio_name])
122
- path = os.path.join(local_extracted_archive, "test", path) if local_extracted_archive else path
123
- result["audio"] = {"path": path, "bytes":f.read()}
124
- yield id_, result
125
- id_ +=1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
  # Lint as: python3
17
+ """Librispeech automatic speech recognition dataset."""
18
 
19
 
 
20
  import os
 
21
 
22
  import datasets
23
+ from datasets.tasks import AutomaticSpeechRecognition
 
24
 
25
 
26
  _CITATION = """\
 
35
  """
36
 
37
  _DESCRIPTION = """\
38
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
39
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
40
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
41
  """
42
 
43
+ _URL = "http://www.openslr.org/12"
44
+ _DL_URL = "http://www.openslr.org/resources/12/"
45
+
46
+
47
+ _DL_URLS = {
48
+ "clean": {
49
+ "dev": _DL_URL + "dev-clean.tar.gz",
50
+ "test": _DL_URL + "test-clean.tar.gz",
51
+ "train.100": _DL_URL + "train-clean-100.tar.gz",
52
+ "train.360": _DL_URL + "train-clean-360.tar.gz",
53
+ },
54
+ "other": {
55
+ "test": _DL_URL + "test-other.tar.gz",
56
+ "dev": _DL_URL + "dev-other.tar.gz",
57
+ "train.500": _DL_URL + "train-other-500.tar.gz",
58
+ },
59
+ "all": {
60
+ "dev.clean": _DL_URL + "dev-clean.tar.gz",
61
+ "dev.other": _DL_URL + "dev-other.tar.gz",
62
+ "test.clean": _DL_URL + "test-clean.tar.gz",
63
+ "test.other": _DL_URL + "test-other.tar.gz",
64
+ "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
65
+ "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
66
+ "train.other.500": _DL_URL + "train-other-500.tar.gz",
67
+ },
68
+ }
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ class LibrispeechASRConfig(datasets.BuilderConfig):
72
+ """BuilderConfig for LibriSpeechASR."""
73
 
74
+ def __init__(self, **kwargs):
75
+ """
76
+ Args:
77
+ data_dir: `string`, the path to the folder containing the files in the
78
+ downloaded .tar
79
+ citation: `string`, citation for the data set
80
+ url: `string`, url for information about the data set
81
+ **kwargs: keyword arguments forwarded to super.
82
+ """
83
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
84
 
85
+
86
+ class LibrispeechASR(datasets.GeneratorBasedBuilder):
87
+ """Librispeech dataset."""
88
+
89
+ DEFAULT_WRITER_BATCH_SIZE = 256
90
+ DEFAULT_CONFIG_NAME = "all"
91
  BUILDER_CONFIGS = [
92
+ LibrispeechASRConfig(name="clean", description="'Clean' speech."),
93
+ LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
94
+ LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
95
  ]
96
 
97
  def _info(self):
 
99
  description=_DESCRIPTION,
100
  features=datasets.Features(
101
  {
102
+ "file": datasets.Value("string"),
103
  "audio": datasets.Audio(sampling_rate=16_000),
104
+ "text": datasets.Value("string"),
105
+ "speaker_id": datasets.Value("int64"),
106
+ "chapter_id": datasets.Value("int64"),
107
+ "id": datasets.Value("string"),
108
  }
109
  ),
110
+ supervised_keys=("file", "text"),
111
+ homepage=_URL,
112
+ citation=_CITATION,
113
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
114
  )
115
 
116
  def _split_generators(self, dl_manager):
117
+ archive_path = dl_manager.download(_DL_URLS[self.config.name])
118
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
119
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
120
+
121
+ if self.config.name == "clean":
122
+ train_splits = [
123
+ datasets.SplitGenerator(
124
+ name="train.100",
125
+ gen_kwargs={
126
+ "local_extracted_archive": local_extracted_archive.get("train.100"),
127
+ "files": dl_manager.iter_archive(archive_path["train.100"]),
128
+ },
129
+ ),
130
+ datasets.SplitGenerator(
131
+ name="train.360",
132
+ gen_kwargs={
133
+ "local_extracted_archive": local_extracted_archive.get("train.360"),
134
+ "files": dl_manager.iter_archive(archive_path["train.360"]),
135
+ },
136
+ ),
137
+ ]
138
+ dev_splits = [
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.VALIDATION,
141
+ gen_kwargs={
142
+ "local_extracted_archive": local_extracted_archive.get("dev"),
143
+ "files": dl_manager.iter_archive(archive_path["dev"]),
144
+ },
145
+ )
146
+ ]
147
+ test_splits = [
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TEST,
150
+ gen_kwargs={
151
+ "local_extracted_archive": local_extracted_archive.get("test"),
152
+ "files": dl_manager.iter_archive(archive_path["test"]),
153
+ },
154
+ )
155
+ ]
156
+ elif self.config.name == "other":
157
+ train_splits = [
158
+ datasets.SplitGenerator(
159
+ name="train.500",
160
+ gen_kwargs={
161
+ "local_extracted_archive": local_extracted_archive.get("train.500"),
162
+ "files": dl_manager.iter_archive(archive_path["train.500"]),
163
+ },
164
+ )
165
+ ]
166
+ dev_splits = [
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ gen_kwargs={
170
+ "local_extracted_archive": local_extracted_archive.get("dev"),
171
+ "files": dl_manager.iter_archive(archive_path["dev"]),
172
+ },
173
+ )
174
+ ]
175
+ test_splits = [
176
+ datasets.SplitGenerator(
177
+ name=datasets.Split.TEST,
178
+ gen_kwargs={
179
+ "local_extracted_archive": local_extracted_archive.get("test"),
180
+ "files": dl_manager.iter_archive(archive_path["test"]),
181
+ },
182
+ )
183
+ ]
184
+ elif self.config.name == "all":
185
+ train_splits = [
186
+ datasets.SplitGenerator(
187
+ name="train.clean.100",
188
+ gen_kwargs={
189
+ "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
190
+ "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
191
+ },
192
+ ),
193
+ datasets.SplitGenerator(
194
+ name="train.clean.360",
195
+ gen_kwargs={
196
+ "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
197
+ "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
198
+ },
199
+ ),
200
+ datasets.SplitGenerator(
201
+ name="train.other.500",
202
+ gen_kwargs={
203
+ "local_extracted_archive": local_extracted_archive.get("train.other.500"),
204
+ "files": dl_manager.iter_archive(archive_path["train.other.500"]),
205
+ },
206
+ ),
207
+ ]
208
+ dev_splits = [
209
+ datasets.SplitGenerator(
210
+ name="validation.clean",
211
+ gen_kwargs={
212
+ "local_extracted_archive": local_extracted_archive.get("dev.clean"),
213
+ "files": dl_manager.iter_archive(archive_path["dev.clean"]),
214
+ },
215
+ ),
216
+ datasets.SplitGenerator(
217
+ name="validation.other",
218
+ gen_kwargs={
219
+ "local_extracted_archive": local_extracted_archive.get("dev.other"),
220
+ "files": dl_manager.iter_archive(archive_path["dev.other"]),
221
+ },
222
+ ),
223
+ ]
224
+ test_splits = [
225
+ datasets.SplitGenerator(
226
+ name="test.clean",
227
+ gen_kwargs={
228
+ "local_extracted_archive": local_extracted_archive.get("test.clean"),
229
+ "files": dl_manager.iter_archive(archive_path["test.clean"]),
230
+ },
231
+ ),
232
+ datasets.SplitGenerator(
233
+ name="test.other",
234
+ gen_kwargs={
235
+ "local_extracted_archive": local_extracted_archive.get("test.other"),
236
+ "files": dl_manager.iter_archive(archive_path["test.other"]),
237
+ },
238
+ ),
239
+ ]
240
+
241
+ return train_splits + dev_splits + test_splits
242
+
243
+ def _generate_examples(self, files, local_extracted_archive):
244
+ """Generate examples from a LibriSpeech archive_path."""
245
+ key = 0
246
+ audio_data = {}
247
+ transcripts = []
248
+ for path, f in files:
249
+ if path.endswith(".flac"):
250
+ id_ = path.split("/")[-1][: -len(".flac")]
251
+ audio_data[id_] = f.read()
252
+ elif path.endswith(".trans.txt"):
253
+ for line in f:
254
+ if line:
255
+ line = line.decode("utf-8").strip()
256
+ id_, transcript = line.split(" ", 1)
257
+ audio_file = f"{id_}.flac"
258
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
259
+ audio_file = (
260
+ os.path.join(local_extracted_archive, audio_file)
261
+ if local_extracted_archive
262
+ else audio_file
263
+ )
264
+ transcripts.append(
265
+ {
266
+ "id": id_,
267
+ "speaker_id": speaker_id,
268
+ "chapter_id": chapter_id,
269
+ "file": audio_file,
270
+ "text": transcript,
271
+ }
272
+ )
273
+ if audio_data and len(audio_data) == len(transcripts):
274
+ for transcript in transcripts:
275
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
276
+ yield key, {"audio": audio, **transcript}
277
+ key += 1
278
+ audio_data = {}
279
+ transcripts = []