speech-test commited on
Commit
350d601
1 Parent(s): 0b620a8

Upload KS, IC, SI, ER

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. .gitignore +3 -0
  3. er.json +3 -0
  4. ic.json +3 -0
  5. ks.json +3 -0
  6. si.json +3 -0
  7. superb_dummy.py +249 -0
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .idea
2
+ .vscode
3
+ *.lock
er.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:198b0e002b23e243db2081dd531d0534a9c9abb804d0c5a452f1b9b208df11f5
3
+ size 6110558
ic.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f18d8b28e1dd18f272e18678b79c61ca4e07f54eba7cbce0926e1e8f360f297e
3
+ size 3451838
ks.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b73f8945241f056c5f0b58895de2da672716c512551150428d3db0a334bbdc53
3
+ size 2754033
si.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:864caab59d624605513916e31dc1db23695355be773b6548598f9cad77a36126
3
+ size 12841264
superb_dummy.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
18
+
19
+
20
+ import base64
21
+ import json
22
+ import textwrap
23
+
24
+ import datasets
25
+ import numpy as np
26
+
27
+ _CITATION = """\
28
+ @article{DBLP:journals/corr/abs-2105-01051,
29
+ author = {Shu{-}Wen Yang and
30
+ Po{-}Han Chi and
31
+ Yung{-}Sung Chuang and
32
+ Cheng{-}I Jeff Lai and
33
+ Kushal Lakhotia and
34
+ Yist Y. Lin and
35
+ Andy T. Liu and
36
+ Jiatong Shi and
37
+ Xuankai Chang and
38
+ Guan{-}Ting Lin and
39
+ Tzu{-}Hsien Huang and
40
+ Wei{-}Cheng Tseng and
41
+ Ko{-}tik Lee and
42
+ Da{-}Rong Liu and
43
+ Zili Huang and
44
+ Shuyan Dong and
45
+ Shang{-}Wen Li and
46
+ Shinji Watanabe and
47
+ Abdelrahman Mohamed and
48
+ Hung{-}yi Lee},
49
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
50
+ journal = {CoRR},
51
+ volume = {abs/2105.01051},
52
+ year = {2021},
53
+ url = {https://arxiv.org/abs/2105.01051},
54
+ archivePrefix = {arXiv},
55
+ eprint = {2105.01051},
56
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
57
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
58
+ bibsource = {dblp computer science bibliography, https://dblp.org}
59
+ }
60
+ """
61
+
62
+ _DESCRIPTION = """\
63
+ Self-supervised learning (SSL) has proven vital for advancing research in
64
+ natural language processing (NLP) and computer vision (CV). The paradigm
65
+ pretrains a shared model on large volumes of unlabeled data and achieves
66
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
67
+ speech processing community lacks a similar setup to systematically explore the
68
+ paradigm. To bridge this gap, we introduce Speech processing Universal
69
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
70
+ performance of a shared model across a wide range of speech processing tasks
71
+ with minimal architecture changes and labeled data. Among multiple usages of the
72
+ shared model, we especially focus on extracting the representation learned from
73
+ SSL due to its preferable re-usability. We present a simple framework to solve
74
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
75
+ the frozen shared model. Our results demonstrate that the framework is promising
76
+ as SSL representations show competitive generalizability and accessibility
77
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
78
+ benchmark toolkit to fuel the research in representation learning and general
79
+ speech processing.
80
+ """
81
+
82
+
83
+ class SuperbConfig(datasets.BuilderConfig):
84
+ """BuilderConfig for Superb."""
85
+
86
+ def __init__(
87
+ self,
88
+ features,
89
+ url,
90
+ data_url=None,
91
+ supervised_keys=None,
92
+ task_templates=None,
93
+ **kwargs,
94
+ ):
95
+ super().__init__(version=datasets.Version("1.9.0", ""), **kwargs)
96
+ self.features = features
97
+ self.data_url = data_url
98
+ self.url = url
99
+ self.supervised_keys = supervised_keys
100
+ self.task_templates = task_templates
101
+
102
+
103
+ class Superb(datasets.GeneratorBasedBuilder):
104
+ """Superb dataset."""
105
+
106
+ BUILDER_CONFIGS = [
107
+ SuperbConfig(
108
+ name="ks",
109
+ description=textwrap.dedent(
110
+ """\
111
+ Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of
112
+ words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and
113
+ inference time are all crucial. SUPERB uses the widely used [Speech Commands dataset v1.0] for the task.
114
+ The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the
115
+ false positive. The evaluation metric is accuracy (ACC)"""
116
+ ),
117
+ features=datasets.Features(
118
+ {
119
+ "file": datasets.Value("string"),
120
+ "label": datasets.ClassLabel(
121
+ names=[
122
+ "yes",
123
+ "no",
124
+ "up",
125
+ "down",
126
+ "left",
127
+ "right",
128
+ "on",
129
+ "off",
130
+ "stop",
131
+ "go",
132
+ "_silence_",
133
+ "_unknown_",
134
+ ]
135
+ ),
136
+ "speech": datasets.Sequence(datasets.Value("float32")),
137
+ }
138
+ ),
139
+ url="https://www.tensorflow.org/datasets/catalog/speech_commands",
140
+ data_url="ks.json",
141
+ ),
142
+ SuperbConfig(
143
+ name="ic",
144
+ description=textwrap.dedent(
145
+ """\
146
+ Intent Classification (IC) classifies utterances into predefined classes to determine the intent of
147
+ speakers. SUPERB uses the Fluent Speech Commands dataset, where each utterance is tagged with three intent
148
+ labels: action, object, and location. The evaluation metric is accuracy (ACC)."""
149
+ ),
150
+ features=datasets.Features(
151
+ {
152
+ "file": datasets.Value("string"),
153
+ "speaker_id": datasets.Value("string"),
154
+ "text": datasets.Value("string"),
155
+ "action": datasets.ClassLabel(
156
+ names=["activate", "bring", "change language", "deactivate", "decrease", "increase"]
157
+ ),
158
+ "object": datasets.ClassLabel(
159
+ names=[
160
+ "Chinese",
161
+ "English",
162
+ "German",
163
+ "Korean",
164
+ "heat",
165
+ "juice",
166
+ "lamp",
167
+ "lights",
168
+ "music",
169
+ "newspaper",
170
+ "none",
171
+ "shoes",
172
+ "socks",
173
+ "volume",
174
+ ]
175
+ ),
176
+ "location": datasets.ClassLabel(names=["bedroom", "kitchen", "none", "washroom"]),
177
+ "speech": datasets.Sequence(datasets.Value("float32")),
178
+ }
179
+ ),
180
+ url="https://fluent.ai/fluent-speech-commands-a-dataset-for-spoken-language-understanding-research/",
181
+ data_url="ic.json",
182
+ ),
183
+ SuperbConfig(
184
+ name="si",
185
+ description=textwrap.dedent(
186
+ """\
187
+ Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class
188
+ classification, where speakers are in the same predefined set for both training and testing. The widely
189
+ used VoxCeleb1 dataset is adopted, and the evaluation metric is accuracy (ACC)."""
190
+ ),
191
+ features=datasets.Features(
192
+ {
193
+ "file": datasets.Value("string"),
194
+ "label": datasets.ClassLabel(names=[f"id{i+10001}" for i in range(1251)]),
195
+ "speech": datasets.Sequence(datasets.Value("float32")),
196
+ }
197
+ ),
198
+ url="https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1.html",
199
+ data_url="si.json",
200
+ ),
201
+ SuperbConfig(
202
+ name="er",
203
+ description=textwrap.dedent(
204
+ """\
205
+ Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset
206
+ IEMOCAP is adopted, and we follow the conventional evaluation protocol: we drop the unbalance emotion
207
+ classes to leave the final four classes with a similar amount of data points and cross-validates on five
208
+ folds of the standard splits. The evaluation metric is accuracy (ACC)."""
209
+ ),
210
+ features=datasets.Features(
211
+ {
212
+ "file": datasets.Value("string"),
213
+ "label": datasets.ClassLabel(names=["neu", "hap", "ang", "sad"]),
214
+ "speech": datasets.Sequence(datasets.Value("float32")),
215
+ }
216
+ ),
217
+ url="https://sail.usc.edu/iemocap/",
218
+ data_url="er.json",
219
+ ),
220
+ ]
221
+
222
+ def _info(self):
223
+ return datasets.DatasetInfo(
224
+ description=_DESCRIPTION,
225
+ features=self.config.features,
226
+ supervised_keys=self.config.supervised_keys,
227
+ homepage=self.config.url,
228
+ citation=_CITATION,
229
+ task_templates=self.config.task_templates,
230
+ )
231
+
232
+ def _split_generators(self, dl_manager):
233
+ data_path = dl_manager.download_and_extract(self.config.data_url)
234
+
235
+ return [
236
+ datasets.SplitGenerator(
237
+ name=datasets.Split.VALIDATION,
238
+ gen_kwargs={"data_path": data_path},
239
+ )
240
+ ]
241
+
242
+ def _generate_examples(self, data_path):
243
+ """Generate examples."""
244
+ with open(data_path, "r", encoding="utf-8") as f:
245
+ for key, line in enumerate(f):
246
+ example = json.loads(line)
247
+ example["speech"] = np.frombuffer(base64.b64decode(example["speech"]), dtype=np.float32)
248
+
249
+ yield key, example