speech-test commited on
Commit
715e154
1 Parent(s): d9baac1

Upload data

Browse files
data/IEMOCAP_full_release.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99da5de585066b100f2a16b8960a350a6620fa2487f1127e969198f7d7f9bcba
3
+ size 1209515
data/LibriSpeech-test-clean.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59633668a54ac2fbd99283afa291be3c3db130a1cf36687e18d8876db9f2df1
3
+ size 626257
data/VoxCeleb1.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:539e1de8d0158ab7cb7fbb7dd793bab99bada17319038e31bccfbed16c9b2219
3
+ size 1512582
data/fluent_speech_commands_dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d0ef6e970baffb1b6ca6f370c07240bdcd0dd32b1436426fa025019c00d9894
3
+ size 494518
data/speech_commands_test_set_v0.01.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:026ed5d467c50a07dec3ada87a9d833ba9d21cd7367721d3dcb08a28482d4c06
3
+ size 211385
superb_demo.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
18
+
19
+ import csv
20
+ import glob
21
+ import os
22
+ import textwrap
23
+
24
+ import datasets
25
+ from datasets.tasks import AutomaticSpeechRecognition
26
+
27
+ _CITATION = """\
28
+ @article{DBLP:journals/corr/abs-2105-01051,
29
+ author = {Shu{-}Wen Yang and
30
+ Po{-}Han Chi and
31
+ Yung{-}Sung Chuang and
32
+ Cheng{-}I Jeff Lai and
33
+ Kushal Lakhotia and
34
+ Yist Y. Lin and
35
+ Andy T. Liu and
36
+ Jiatong Shi and
37
+ Xuankai Chang and
38
+ Guan{-}Ting Lin and
39
+ Tzu{-}Hsien Huang and
40
+ Wei{-}Cheng Tseng and
41
+ Ko{-}tik Lee and
42
+ Da{-}Rong Liu and
43
+ Zili Huang and
44
+ Shuyan Dong and
45
+ Shang{-}Wen Li and
46
+ Shinji Watanabe and
47
+ Abdelrahman Mohamed and
48
+ Hung{-}yi Lee},
49
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
50
+ journal = {CoRR},
51
+ volume = {abs/2105.01051},
52
+ year = {2021},
53
+ url = {https://arxiv.org/abs/2105.01051},
54
+ archivePrefix = {arXiv},
55
+ eprint = {2105.01051},
56
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
57
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
58
+ bibsource = {dblp computer science bibliography, https://dblp.org}
59
+ }
60
+ """
61
+
62
+ _DESCRIPTION = """\
63
+ Self-supervised learning (SSL) has proven vital for advancing research in
64
+ natural language processing (NLP) and computer vision (CV). The paradigm
65
+ pretrains a shared model on large volumes of unlabeled data and achieves
66
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
67
+ speech processing community lacks a similar setup to systematically explore the
68
+ paradigm. To bridge this gap, we introduce Speech processing Universal
69
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
70
+ performance of a shared model across a wide range of speech processing tasks
71
+ with minimal architecture changes and labeled data. Among multiple usages of the
72
+ shared model, we especially focus on extracting the representation learned from
73
+ SSL due to its preferable re-usability. We present a simple framework to solve
74
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
75
+ the frozen shared model. Our results demonstrate that the framework is promising
76
+ as SSL representations show competitive generalizability and accessibility
77
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
78
+ benchmark toolkit to fuel the research in representation learning and general
79
+ speech processing.
80
+
81
+ Note that in order to limit the required storage for preparing this dataset, the
82
+ audio is stored in the .flac format and is not converted to a float32 array. To
83
+ convert, the audio file to a float32 array, please make use of the `.map()`
84
+ function as follows:
85
+
86
+
87
+ ```python
88
+ import soundfile as sf
89
+
90
+ def map_to_array(batch):
91
+ speech_array, _ = sf.read(batch["file"])
92
+ batch["speech"] = speech_array
93
+ return batch
94
+
95
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
96
+ ```
97
+ """
98
+
99
+
100
+ class SuperbConfig(datasets.BuilderConfig):
101
+ """BuilderConfig for Superb."""
102
+
103
+ def __init__(
104
+ self,
105
+ features,
106
+ url,
107
+ data_url=None,
108
+ supervised_keys=None,
109
+ task_templates=None,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(version=datasets.Version("1.9.0", ""), **kwargs)
113
+ self.features = features
114
+ self.data_url = data_url
115
+ self.url = url
116
+ self.supervised_keys = supervised_keys
117
+ self.task_templates = task_templates
118
+
119
+
120
+ class Superb(datasets.GeneratorBasedBuilder):
121
+ """Superb dataset."""
122
+
123
+ BUILDER_CONFIGS = [
124
+ SuperbConfig(
125
+ name="asr",
126
+ description=textwrap.dedent(
127
+ """\
128
+ ASR transcribes utterances into words. While PR analyzes the
129
+ improvement in modeling phonetics, ASR reflects the significance of
130
+ the improvement in a real-world scenario. LibriSpeech
131
+ train-clean-100/dev-clean/test-clean subsets are used for
132
+ training/validation/testing. The evaluation metric is word error
133
+ rate (WER)."""
134
+ ),
135
+ features=datasets.Features(
136
+ {
137
+ "file": datasets.Value("string"),
138
+ "text": datasets.Value("string"),
139
+ "speaker_id": datasets.Value("int64"),
140
+ "chapter_id": datasets.Value("int64"),
141
+ "id": datasets.Value("string"),
142
+ }
143
+ ),
144
+ supervised_keys=("file", "text"),
145
+ url="http://www.openslr.org/12",
146
+ data_url="data/LibriSpeech-test-clean.zip",
147
+ task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")],
148
+ ),
149
+ SuperbConfig(
150
+ name="ks",
151
+ description=textwrap.dedent(
152
+ """\
153
+ Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of
154
+ words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and
155
+ inference time are all crucial. SUPERB uses the widely used Speech Commands dataset v1.0 for the task.
156
+ The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the
157
+ false positive. The evaluation metric is accuracy (ACC)"""
158
+ ),
159
+ features=datasets.Features(
160
+ {
161
+ "file": datasets.Value("string"),
162
+ "label": datasets.ClassLabel(
163
+ names=[
164
+ "yes",
165
+ "no",
166
+ "up",
167
+ "down",
168
+ "left",
169
+ "right",
170
+ "on",
171
+ "off",
172
+ "stop",
173
+ "go",
174
+ "_silence_",
175
+ "_unknown_",
176
+ ]
177
+ ),
178
+ }
179
+ ),
180
+ supervised_keys=("file", "label"),
181
+ url="https://www.tensorflow.org/datasets/catalog/speech_commands",
182
+ data_url="data/speech_commands_test_set_v0.01.zip",
183
+ ),
184
+ SuperbConfig(
185
+ name="ic",
186
+ description=textwrap.dedent(
187
+ """\
188
+ Intent Classification (IC) classifies utterances into predefined classes to determine the intent of
189
+ speakers. SUPERB uses the Fluent Speech Commands dataset, where each utterance is tagged with three intent
190
+ labels: action, object, and location. The evaluation metric is accuracy (ACC)."""
191
+ ),
192
+ features=datasets.Features(
193
+ {
194
+ "file": datasets.Value("string"),
195
+ "speaker_id": datasets.Value("string"),
196
+ "text": datasets.Value("string"),
197
+ "action": datasets.ClassLabel(
198
+ names=["activate", "bring", "change language", "deactivate", "decrease", "increase"]
199
+ ),
200
+ "object": datasets.ClassLabel(
201
+ names=[
202
+ "Chinese",
203
+ "English",
204
+ "German",
205
+ "Korean",
206
+ "heat",
207
+ "juice",
208
+ "lamp",
209
+ "lights",
210
+ "music",
211
+ "newspaper",
212
+ "none",
213
+ "shoes",
214
+ "socks",
215
+ "volume",
216
+ ]
217
+ ),
218
+ "location": datasets.ClassLabel(names=["bedroom", "kitchen", "none", "washroom"]),
219
+ }
220
+ ),
221
+ # no default supervised keys, since there are 3 labels
222
+ supervised_keys=None,
223
+ url="https://fluent.ai/fluent-speech-commands-a-dataset-for-spoken-language-understanding-research/",
224
+ data_url="data/fluent_speech_commands_dataset.zip",
225
+ ),
226
+ SuperbConfig(
227
+ name="si",
228
+ description=textwrap.dedent(
229
+ """\
230
+ Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class
231
+ classification, where speakers are in the same predefined set for both training and testing. The widely
232
+ used VoxCeleb1 dataset is adopted, and the evaluation metric is accuracy (ACC)."""
233
+ ),
234
+ features=datasets.Features(
235
+ {
236
+ "file": datasets.Value("string"),
237
+ "label": datasets.ClassLabel(names=[f"id{i + 10001}" for i in range(1251)]),
238
+ }
239
+ ),
240
+ supervised_keys=("file", "label"),
241
+ url="https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1.html",
242
+ data_url="data/VoxCeleb1.zip"
243
+ ),
244
+ SuperbConfig(
245
+ name="er",
246
+ description=textwrap.dedent(
247
+ """\
248
+ Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset
249
+ IEMOCAP is adopted, and we follow the conventional evaluation protocol: we drop the unbalance emotion
250
+ classes to leave the final four classes with a similar amount of data points and cross-validates on five
251
+ folds of the standard splits. The evaluation metric is accuracy (ACC)."""
252
+ ),
253
+ features=datasets.Features(
254
+ {
255
+ "file": datasets.Value("string"),
256
+ "label": datasets.ClassLabel(names=['neu', 'hap', 'ang', 'sad']),
257
+ }
258
+ ),
259
+ supervised_keys=("file", "label"),
260
+ url="https://sail.usc.edu/iemocap/",
261
+ data_url="data/IEMOCAP_full_release.zip"
262
+ ),
263
+ ]
264
+
265
+ def _info(self):
266
+ return datasets.DatasetInfo(
267
+ description=_DESCRIPTION,
268
+ features=self.config.features,
269
+ supervised_keys=self.config.supervised_keys,
270
+ homepage=self.config.url,
271
+ citation=_CITATION,
272
+ task_templates=self.config.task_templates,
273
+ )
274
+
275
+ def _split_generators(self, dl_manager):
276
+ if self.config.name == "asr":
277
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
278
+ return [
279
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path}),
280
+ ]
281
+ elif self.config.name == "ks":
282
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
283
+ return [
284
+ datasets.SplitGenerator(
285
+ name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
286
+ ),
287
+ ]
288
+ elif self.config.name == "ic":
289
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
290
+ return [
291
+ datasets.SplitGenerator(
292
+ name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
293
+ ),
294
+ ]
295
+ elif self.config.name == "si":
296
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
297
+ return [
298
+ datasets.SplitGenerator(
299
+ name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": 3}
300
+ ),
301
+ ]
302
+ elif self.config.name == "sd":
303
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
304
+ return [
305
+ datasets.SplitGenerator(
306
+ name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
307
+ )
308
+ ]
309
+ elif self.config.name == "er":
310
+ archive_path = dl_manager.download_and_extract(self.config.data_url)
311
+ return [
312
+ datasets.SplitGenerator(
313
+ name="session1", gen_kwargs={"archive_path": archive_path, "split": 1},
314
+ )
315
+ ]
316
+
317
+ def _generate_examples(self, archive_path, split=None):
318
+ """Generate examples."""
319
+ if self.config.name == "asr":
320
+ transcripts_glob = os.path.join(archive_path, "LibriSpeech", "*/*/*/*.txt")
321
+ key = 0
322
+ for transcript_path in sorted(glob.glob(transcripts_glob)):
323
+ transcript_dir_path = os.path.dirname(transcript_path)
324
+ with open(transcript_path, "r", encoding="utf-8") as f:
325
+ for line in f:
326
+ line = line.strip()
327
+ id_, transcript = line.split(" ", 1)
328
+ audio_file = f"{id_}.flac"
329
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
330
+ yield key, {
331
+ "id": id_,
332
+ "speaker_id": speaker_id,
333
+ "chapter_id": chapter_id,
334
+ "file": os.path.join(transcript_dir_path, audio_file),
335
+ "text": transcript,
336
+ }
337
+ key += 1
338
+ elif self.config.name == "ks":
339
+ words = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go"]
340
+ splits = _split_ks_files(archive_path, split)
341
+ for key, audio_file in enumerate(sorted(splits[split])):
342
+ base_dir, file_name = os.path.split(audio_file)
343
+ _, word = os.path.split(base_dir)
344
+ if word in words:
345
+ label = word
346
+ elif word == "_silence_" or word == "_background_noise_":
347
+ label = "_silence_"
348
+ else:
349
+ label = "_unknown_"
350
+ yield key, {"file": audio_file, "label": label}
351
+ elif self.config.name == "ic":
352
+ root_path = os.path.join(archive_path, "fluent_speech_commands_dataset/")
353
+ csv_path = os.path.join(root_path, f"data/{split}_data.csv")
354
+ with open(csv_path, encoding="utf-8") as csv_file:
355
+ csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
356
+ next(csv_reader)
357
+ for row in csv_reader:
358
+ key, file_path, speaker_id, text, action, object_, location = row
359
+ yield key, {
360
+ "file": os.path.join(root_path, file_path),
361
+ "speaker_id": speaker_id,
362
+ "text": text,
363
+ "action": action,
364
+ "object": object_,
365
+ "location": location,
366
+ }
367
+ elif self.config.name == "si":
368
+ wav_path = os.path.join(archive_path, "wav/")
369
+ splits_path = os.path.join(archive_path, "veri_test_class.txt")
370
+ with open(splits_path, "r", encoding="utf-8") as f:
371
+ for key, line in enumerate(f):
372
+ split_id, file_path = line.strip().split(" ")
373
+ if int(split_id) != split:
374
+ continue
375
+ speaker_id = file_path.split("/")[0]
376
+ yield key, {
377
+ "file": os.path.join(wav_path, file_path),
378
+ "label": speaker_id,
379
+ }
380
+ elif self.config.name == "er":
381
+ root_path = os.path.join(archive_path, f"Session{split}/")
382
+ wav_path = os.path.join(root_path, "sentences/wav/")
383
+ labels_path = os.path.join(root_path, "dialog/EmoEvaluation/*.txt")
384
+ emotions = ['neu', 'hap', 'ang', 'sad', 'exc']
385
+ key = 0
386
+ for labels_file in sorted(glob.glob(labels_path)):
387
+ with open(labels_file, "r", encoding="utf-8") as f:
388
+ for line in f:
389
+ if line[0] != "[":
390
+ continue
391
+ _, filename, emo, _ = line.split("\t")
392
+ if emo not in emotions:
393
+ continue
394
+ wav_subdir = filename.rsplit("_", 1)[0]
395
+ filename = f"{filename}.wav"
396
+ yield key, {
397
+ "file": os.path.join(wav_path, wav_subdir, filename),
398
+ "label": emo.replace('exc', 'hap'),
399
+ }
400
+ key += 1
401
+
402
+
403
+ def _split_ks_files(archive_path, split):
404
+ audio_path = os.path.join(archive_path, "**/*.wav")
405
+ audio_paths = glob.glob(audio_path)
406
+ if split == "test":
407
+ # use all available files for the test archive
408
+ return {"test": audio_paths}
409
+
410
+ val_list_file = os.path.join(archive_path, "validation_list.txt")
411
+ test_list_file = os.path.join(archive_path, "testing_list.txt")
412
+ with open(val_list_file, encoding="utf-8") as f:
413
+ val_paths = f.read().strip().splitlines()
414
+ val_paths = [os.path.join(archive_path, p) for p in val_paths]
415
+ with open(test_list_file, encoding="utf-8") as f:
416
+ test_paths = f.read().strip().splitlines()
417
+ test_paths = [os.path.join(archive_path, p) for p in test_paths]
418
+
419
+ # the paths for the train set is just whichever paths that do not exist in
420
+ # either the test or validation splits
421
+ train_paths = list(set(audio_paths) - set(val_paths) - set(test_paths))
422
+
423
+ return {"train": train_paths, "val": val_paths}