albertvillanova HF staff commited on
Commit
4902179
1 Parent(s): e1e7365

Delete loading script

Browse files
Files changed (1) hide show
  1. superb_demo.py +0 -435
superb_demo.py DELETED
@@ -1,435 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SUPERB: Speech processing Universal PERformance Benchmark."""
18
-
19
- import csv
20
- import glob
21
- import os
22
- import textwrap
23
-
24
- import datasets
25
- from datasets.tasks import AutomaticSpeechRecognition
26
-
27
- _CITATION = """\
28
- @article{DBLP:journals/corr/abs-2105-01051,
29
- author = {Shu{-}Wen Yang and
30
- Po{-}Han Chi and
31
- Yung{-}Sung Chuang and
32
- Cheng{-}I Jeff Lai and
33
- Kushal Lakhotia and
34
- Yist Y. Lin and
35
- Andy T. Liu and
36
- Jiatong Shi and
37
- Xuankai Chang and
38
- Guan{-}Ting Lin and
39
- Tzu{-}Hsien Huang and
40
- Wei{-}Cheng Tseng and
41
- Ko{-}tik Lee and
42
- Da{-}Rong Liu and
43
- Zili Huang and
44
- Shuyan Dong and
45
- Shang{-}Wen Li and
46
- Shinji Watanabe and
47
- Abdelrahman Mohamed and
48
- Hung{-}yi Lee},
49
- title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
50
- journal = {CoRR},
51
- volume = {abs/2105.01051},
52
- year = {2021},
53
- url = {https://arxiv.org/abs/2105.01051},
54
- archivePrefix = {arXiv},
55
- eprint = {2105.01051},
56
- timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
57
- biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
58
- bibsource = {dblp computer science bibliography, https://dblp.org}
59
- }
60
- """
61
-
62
- _DESCRIPTION = """\
63
- Self-supervised learning (SSL) has proven vital for advancing research in
64
- natural language processing (NLP) and computer vision (CV). The paradigm
65
- pretrains a shared model on large volumes of unlabeled data and achieves
66
- state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
67
- speech processing community lacks a similar setup to systematically explore the
68
- paradigm. To bridge this gap, we introduce Speech processing Universal
69
- PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
70
- performance of a shared model across a wide range of speech processing tasks
71
- with minimal architecture changes and labeled data. Among multiple usages of the
72
- shared model, we especially focus on extracting the representation learned from
73
- SSL due to its preferable re-usability. We present a simple framework to solve
74
- SUPERB tasks by learning task-specialized lightweight prediction heads on top of
75
- the frozen shared model. Our results demonstrate that the framework is promising
76
- as SSL representations show competitive generalizability and accessibility
77
- across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
78
- benchmark toolkit to fuel the research in representation learning and general
79
- speech processing.
80
-
81
- Note that in order to limit the required storage for preparing this dataset, the
82
- audio is stored in the .flac format and is not converted to a float32 array. To
83
- convert, the audio file to a float32 array, please make use of the `.map()`
84
- function as follows:
85
-
86
-
87
- ```python
88
- import soundfile as sf
89
-
90
- def map_to_array(batch):
91
- speech_array, _ = sf.read(batch["file"])
92
- batch["speech"] = speech_array
93
- return batch
94
-
95
- dataset = dataset.map(map_to_array, remove_columns=["file"])
96
- ```
97
- """
98
-
99
-
100
- class SuperbConfig(datasets.BuilderConfig):
101
- """BuilderConfig for Superb."""
102
-
103
- def __init__(
104
- self,
105
- features,
106
- url,
107
- data_url=None,
108
- supervised_keys=None,
109
- task_templates=None,
110
- **kwargs,
111
- ):
112
- super().__init__(version=datasets.Version("1.9.0", ""), **kwargs)
113
- self.features = features
114
- self.data_url = data_url
115
- self.url = url
116
- self.supervised_keys = supervised_keys
117
- self.task_templates = task_templates
118
-
119
-
120
- class Superb(datasets.GeneratorBasedBuilder):
121
- """Superb dataset."""
122
-
123
- BUILDER_CONFIGS = [
124
- SuperbConfig(
125
- name="asr",
126
- description=textwrap.dedent(
127
- """\
128
- ASR transcribes utterances into words. While PR analyzes the
129
- improvement in modeling phonetics, ASR reflects the significance of
130
- the improvement in a real-world scenario. LibriSpeech
131
- train-clean-100/dev-clean/test-clean subsets are used for
132
- training/validation/testing. The evaluation metric is word error
133
- rate (WER)."""
134
- ),
135
- features=datasets.Features(
136
- {
137
- "file": datasets.Value("string"),
138
- "audio": datasets.features.Audio(sampling_rate=16_000),
139
- "text": datasets.Value("string"),
140
- "speaker_id": datasets.Value("int64"),
141
- "chapter_id": datasets.Value("int64"),
142
- "id": datasets.Value("string"),
143
- }
144
- ),
145
- supervised_keys=("file", "text"),
146
- url="http://www.openslr.org/12",
147
- data_url="data/LibriSpeech-test-clean.zip",
148
- ),
149
- SuperbConfig(
150
- name="ks",
151
- description=textwrap.dedent(
152
- """\
153
- Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of
154
- words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and
155
- inference time are all crucial. SUPERB uses the widely used Speech Commands dataset v1.0 for the task.
156
- The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the
157
- false positive. The evaluation metric is accuracy (ACC)"""
158
- ),
159
- features=datasets.Features(
160
- {
161
- "file": datasets.Value("string"),
162
- "audio": datasets.features.Audio(sampling_rate=16_000),
163
- "label": datasets.ClassLabel(
164
- names=[
165
- "yes",
166
- "no",
167
- "up",
168
- "down",
169
- "left",
170
- "right",
171
- "on",
172
- "off",
173
- "stop",
174
- "go",
175
- "_silence_",
176
- "_unknown_",
177
- ]
178
- ),
179
- }
180
- ),
181
- supervised_keys=("file", "label"),
182
- url="https://www.tensorflow.org/datasets/catalog/speech_commands",
183
- data_url="data/speech_commands_test_set_v0.01.zip",
184
- ),
185
- SuperbConfig(
186
- name="ic",
187
- description=textwrap.dedent(
188
- """\
189
- Intent Classification (IC) classifies utterances into predefined classes to determine the intent of
190
- speakers. SUPERB uses the Fluent Speech Commands dataset, where each utterance is tagged with three intent
191
- labels: action, object, and location. The evaluation metric is accuracy (ACC)."""
192
- ),
193
- features=datasets.Features(
194
- {
195
- "file": datasets.Value("string"),
196
- "audio": datasets.features.Audio(sampling_rate=16_000),
197
- "speaker_id": datasets.Value("string"),
198
- "text": datasets.Value("string"),
199
- "action": datasets.ClassLabel(
200
- names=["activate", "bring", "change language", "deactivate", "decrease", "increase"]
201
- ),
202
- "object": datasets.ClassLabel(
203
- names=[
204
- "Chinese",
205
- "English",
206
- "German",
207
- "Korean",
208
- "heat",
209
- "juice",
210
- "lamp",
211
- "lights",
212
- "music",
213
- "newspaper",
214
- "none",
215
- "shoes",
216
- "socks",
217
- "volume",
218
- ]
219
- ),
220
- "location": datasets.ClassLabel(names=["bedroom", "kitchen", "none", "washroom"]),
221
- }
222
- ),
223
- # no default supervised keys, since there are 3 labels
224
- supervised_keys=None,
225
- url="https://fluent.ai/fluent-speech-commands-a-dataset-for-spoken-language-understanding-research/",
226
- data_url="data/fluent_speech_commands_dataset.zip",
227
- ),
228
- SuperbConfig(
229
- name="si",
230
- description=textwrap.dedent(
231
- """\
232
- Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class
233
- classification, where speakers are in the same predefined set for both training and testing. The widely
234
- used VoxCeleb1 dataset is adopted, and the evaluation metric is accuracy (ACC)."""
235
- ),
236
- features=datasets.Features(
237
- {
238
- "file": datasets.Value("string"),
239
- "audio": datasets.features.Audio(sampling_rate=16_000),
240
- "label": datasets.ClassLabel(names=[f"id{i + 10001}" for i in range(1251)]),
241
- }
242
- ),
243
- supervised_keys=("file", "label"),
244
- url="https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1.html",
245
- data_url="data/VoxCeleb1.zip"
246
- ),
247
- SuperbConfig(
248
- name="er",
249
- description=textwrap.dedent(
250
- """\
251
- Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset
252
- IEMOCAP is adopted, and we follow the conventional evaluation protocol: we drop the unbalance emotion
253
- classes to leave the final four classes with a similar amount of data points and cross-validates on five
254
- folds of the standard splits. The evaluation metric is accuracy (ACC)."""
255
- ),
256
- features=datasets.Features(
257
- {
258
- "file": datasets.Value("string"),
259
- "audio": datasets.features.Audio(sampling_rate=16_000),
260
- "label": datasets.ClassLabel(names=['neu', 'hap', 'ang', 'sad']),
261
- }
262
- ),
263
- supervised_keys=("file", "label"),
264
- url="https://sail.usc.edu/iemocap/",
265
- data_url="data/IEMOCAP_full_release.zip"
266
- ),
267
- ]
268
-
269
- def _info(self):
270
- return datasets.DatasetInfo(
271
- description=_DESCRIPTION,
272
- features=self.config.features,
273
- supervised_keys=self.config.supervised_keys,
274
- homepage=self.config.url,
275
- citation=_CITATION,
276
- task_templates=self.config.task_templates,
277
- )
278
-
279
- def _split_generators(self, dl_manager):
280
- if self.config.name == "asr":
281
- archive_path = dl_manager.download_and_extract(self.config.data_url)
282
- return [
283
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path}),
284
- ]
285
- elif self.config.name == "ks":
286
- archive_path = dl_manager.download_and_extract(self.config.data_url)
287
- return [
288
- datasets.SplitGenerator(
289
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
290
- ),
291
- ]
292
- elif self.config.name == "ic":
293
- archive_path = dl_manager.download_and_extract(self.config.data_url)
294
- return [
295
- datasets.SplitGenerator(
296
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
297
- ),
298
- ]
299
- elif self.config.name == "si":
300
- archive_path = dl_manager.download_and_extract(self.config.data_url)
301
- return [
302
- datasets.SplitGenerator(
303
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": 3}
304
- ),
305
- ]
306
- elif self.config.name == "sd":
307
- archive_path = dl_manager.download_and_extract(self.config.data_url)
308
- return [
309
- datasets.SplitGenerator(
310
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
311
- )
312
- ]
313
- elif self.config.name == "er":
314
- archive_path = dl_manager.download_and_extract(self.config.data_url)
315
- return [
316
- datasets.SplitGenerator(
317
- name="session1", gen_kwargs={"archive_path": archive_path, "split": 1},
318
- )
319
- ]
320
-
321
- def _generate_examples(self, archive_path, split=None):
322
- """Generate examples."""
323
- if self.config.name == "asr":
324
- transcripts_glob = os.path.join(archive_path, "LibriSpeech", "*/*/*/*.txt")
325
- key = 0
326
- for transcript_path in sorted(glob.glob(transcripts_glob)):
327
- transcript_dir_path = os.path.dirname(transcript_path)
328
- with open(transcript_path, "r", encoding="utf-8") as f:
329
- for line in f:
330
- line = line.strip()
331
- id_, transcript = line.split(" ", 1)
332
- audio_file = f"{id_}.flac"
333
- speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
334
- audio_path = os.path.join(transcript_dir_path, audio_file)
335
- yield key, {
336
- "id": id_,
337
- "speaker_id": speaker_id,
338
- "chapter_id": chapter_id,
339
- "file": audio_path,
340
- "audio": audio_path,
341
- "text": transcript,
342
- }
343
- key += 1
344
- elif self.config.name == "ks":
345
- words = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go"]
346
- splits = _split_ks_files(archive_path, split)
347
- for key, audio_file in enumerate(sorted(splits[split])):
348
- base_dir, file_name = os.path.split(audio_file)
349
- _, word = os.path.split(base_dir)
350
- if word in words:
351
- label = word
352
- elif word == "_silence_" or word == "_background_noise_":
353
- label = "_silence_"
354
- else:
355
- label = "_unknown_"
356
- yield key, {"file": audio_file, "audio": audio_file, "label": label}
357
- elif self.config.name == "ic":
358
- root_path = os.path.join(archive_path, "fluent_speech_commands_dataset/")
359
- csv_path = os.path.join(root_path, f"data/{split}_data.csv")
360
- with open(csv_path, encoding="utf-8") as csv_file:
361
- csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
362
- next(csv_reader)
363
- for row in csv_reader:
364
- key, file_path, speaker_id, text, action, object_, location = row
365
- audio_path = os.path.join(root_path, file_path)
366
- yield key, {
367
- "file": audio_path,
368
- "audio": audio_path,
369
- "speaker_id": speaker_id,
370
- "text": text,
371
- "action": action,
372
- "object": object_,
373
- "location": location,
374
- }
375
- elif self.config.name == "si":
376
- wav_path = os.path.join(archive_path, "wav/")
377
- splits_path = os.path.join(archive_path, "veri_test_class.txt")
378
- with open(splits_path, "r", encoding="utf-8") as f:
379
- for key, line in enumerate(f):
380
- split_id, file_path = line.strip().split(" ")
381
- if int(split_id) != split:
382
- continue
383
- speaker_id = file_path.split("/")[0]
384
- audio_path = os.path.join(wav_path, file_path)
385
- yield key, {
386
- "file": audio_path,
387
- "audio": audio_path,
388
- "label": speaker_id,
389
- }
390
- elif self.config.name == "er":
391
- root_path = os.path.join(archive_path, f"Session{split}/")
392
- wav_path = os.path.join(root_path, "sentences/wav/")
393
- labels_path = os.path.join(root_path, "dialog/EmoEvaluation/*.txt")
394
- emotions = ['neu', 'hap', 'ang', 'sad', 'exc']
395
- key = 0
396
- for labels_file in sorted(glob.glob(labels_path)):
397
- with open(labels_file, "r", encoding="utf-8") as f:
398
- for line in f:
399
- if line[0] != "[":
400
- continue
401
- _, filename, emo, _ = line.split("\t")
402
- if emo not in emotions:
403
- continue
404
- wav_subdir = filename.rsplit("_", 1)[0]
405
- filename = f"{filename}.wav"
406
- audio_path = os.path.join(wav_path, wav_subdir, filename)
407
- yield key, {
408
- "file": audio_path,
409
- "audio": audio_path,
410
- "label": emo.replace("exc", "hap"),
411
- }
412
- key += 1
413
-
414
-
415
- def _split_ks_files(archive_path, split):
416
- audio_path = os.path.join(archive_path, "**/*.wav")
417
- audio_paths = glob.glob(audio_path)
418
- if split == "test":
419
- # use all available files for the test archive
420
- return {"test": audio_paths}
421
-
422
- val_list_file = os.path.join(archive_path, "validation_list.txt")
423
- test_list_file = os.path.join(archive_path, "testing_list.txt")
424
- with open(val_list_file, encoding="utf-8") as f:
425
- val_paths = f.read().strip().splitlines()
426
- val_paths = [os.path.join(archive_path, p) for p in val_paths]
427
- with open(test_list_file, encoding="utf-8") as f:
428
- test_paths = f.read().strip().splitlines()
429
- test_paths = [os.path.join(archive_path, p) for p in test_paths]
430
-
431
- # the paths for the train set is just whichever paths that do not exist in
432
- # either the test or validation splits
433
- train_paths = list(set(audio_paths) - set(val_paths) - set(test_paths))
434
-
435
- return {"train": train_paths, "val": val_paths}