Datasets:

Modalities:
Audio
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
patrickvonplaten commited on
Commit
305afbb
1 Parent(s): 5523b83

Create new file

Browse files
Files changed (1) hide show
  1. ami.py +383 -0
ami.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
16
+ labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
17
+ and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
18
+ and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
19
+ sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
20
+ for speech recognition training, and to filter out segments with low-quality transcription. For system training,
21
+ GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
22
+ For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
23
+ and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
24
+ are re-processed by professional human transcribers to ensure high transcription quality.
25
+ """
26
+
27
+ import csv
28
+ import os
29
+
30
+ import datasets
31
+
32
+ _CITATION = """\
33
+ @article{DBLP:journals/corr/abs-2106-06909,
34
+ author = {Guoguo Chen and
35
+ Shuzhou Chai and
36
+ Guanbo Wang and
37
+ Jiayu Du and
38
+ Wei{-}Qiang Zhang and
39
+ Chao Weng and
40
+ Dan Su and
41
+ Daniel Povey and
42
+ Jan Trmal and
43
+ Junbo Zhang and
44
+ Mingjie Jin and
45
+ Sanjeev Khudanpur and
46
+ Shinji Watanabe and
47
+ Shuaijiang Zhao and
48
+ Wei Zou and
49
+ Xiangang Li and
50
+ Xuchen Yao and
51
+ Yongqing Wang and
52
+ Yujun Wang and
53
+ Zhao You and
54
+ Zhiyong Yan},
55
+ title = {GigaSpeech: An Evolving, Multi-domain {ASR} Corpus with 10, 000 Hours
56
+ of Transcribed Audio},
57
+ journal = {CoRR},
58
+ volume = {abs/2106.06909},
59
+ year = {2021},
60
+ url = {https://arxiv.org/abs/2106.06909},
61
+ eprinttype = {arXiv},
62
+ eprint = {2106.06909},
63
+ timestamp = {Wed, 29 Dec 2021 14:29:26 +0100},
64
+ biburl = {https://dblp.org/rec/journals/corr/abs-2106-06909.bib},
65
+ bibsource = {dblp computer science bibliography, https://dblp.org}
66
+ }
67
+ """
68
+
69
+ _DESCRIPTION = """\
70
+ GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
71
+ labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
72
+ and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
73
+ and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
74
+ sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
75
+ for speech recognition training, and to filter out segments with low-quality transcription. For system training,
76
+ GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
77
+ For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
78
+ and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
79
+ are re-processed by professional human transcribers to ensure high transcription quality.
80
+ """
81
+
82
+ _HOMEPAGE = "https://groups.inf.ed.ac.uk/ami/corpus/"
83
+
84
+ _LICENSE = "CC BY 4.0"
85
+
86
+ _TRAIN_SAMPLE_IDS = [
87
+ "EN2001a",
88
+ "EN2001b",
89
+ "EN2001d",
90
+ "EN2001e",
91
+ "EN2003a",
92
+ "EN2004a",
93
+ "EN2005a",
94
+ "EN2006a",
95
+ "EN2006b",
96
+ "EN2009b",
97
+ "EN2009c",
98
+ "EN2009d",
99
+ "ES2002a",
100
+ "ES2002b",
101
+ "ES2002c",
102
+ "ES2002d",
103
+ "ES2003a",
104
+ "ES2003b",
105
+ "ES2003c",
106
+ "ES2003d",
107
+ "ES2005a",
108
+ "ES2005b",
109
+ "ES2005c",
110
+ "ES2005d",
111
+ "ES2006a",
112
+ "ES2006b",
113
+ "ES2006c",
114
+ "ES2006d",
115
+ "ES2007a",
116
+ "ES2007b",
117
+ "ES2007c",
118
+ "ES2007d",
119
+ "ES2008a",
120
+ "ES2008b",
121
+ "ES2008c",
122
+ "ES2008d",
123
+ "ES2009a",
124
+ "ES2009b",
125
+ "ES2009c",
126
+ "ES2009d",
127
+ "ES2010a",
128
+ "ES2010b",
129
+ "ES2010c",
130
+ "ES2010d",
131
+ "ES2012a",
132
+ "ES2012b",
133
+ "ES2012c",
134
+ "ES2012d",
135
+ "ES2013a",
136
+ "ES2013b",
137
+ "ES2013c",
138
+ "ES2013d",
139
+ "ES2014a",
140
+ "ES2014b",
141
+ "ES2014c",
142
+ "ES2014d",
143
+ "ES2015a",
144
+ "ES2015b",
145
+ "ES2015c",
146
+ "ES2015d",
147
+ "ES2016a",
148
+ "ES2016b",
149
+ "ES2016c",
150
+ "ES2016d",
151
+ "IB4005",
152
+ "IN1001",
153
+ "IN1002",
154
+ "IN1005",
155
+ "IN1007",
156
+ "IN1008",
157
+ "IN1009",
158
+ "IN1012",
159
+ "IN1013",
160
+ "IN1014",
161
+ "IN1016",
162
+ "IS1000a",
163
+ "IS1000b",
164
+ "IS1000c",
165
+ "IS1000d",
166
+ "IS1001a",
167
+ "IS1001b",
168
+ "IS1001c",
169
+ "IS1001d",
170
+ "IS1002b",
171
+ "IS1002c",
172
+ "IS1002d",
173
+ "IS1003a",
174
+ "IS1003b",
175
+ "IS1003c",
176
+ "IS1003d",
177
+ "IS1004a",
178
+ "IS1004b",
179
+ "IS1004c",
180
+ "IS1004d",
181
+ "IS1005a",
182
+ "IS1005b",
183
+ "IS1005c",
184
+ "IS1006a",
185
+ "IS1006b",
186
+ "IS1006c",
187
+ "IS1006d",
188
+ "IS1007a",
189
+ "IS1007b",
190
+ "IS1007c",
191
+ "IS1007d",
192
+ "TS3005a",
193
+ "TS3005b",
194
+ "TS3005c",
195
+ "TS3005d",
196
+ "TS3006a",
197
+ "TS3006b",
198
+ "TS3006c",
199
+ "TS3006d",
200
+ "TS3007a",
201
+ "TS3007b",
202
+ "TS3007c",
203
+ "TS3007d",
204
+ "TS3008a",
205
+ "TS3008b",
206
+ "TS3008c",
207
+ "TS3008d",
208
+ "TS3009a",
209
+ "TS3009b",
210
+ "TS3009c",
211
+ "TS3009d",
212
+ "TS3010a",
213
+ "TS3010b",
214
+ "TS3010c",
215
+ "TS3010d",
216
+ "TS3011a",
217
+ "TS3011b",
218
+ "TS3011c",
219
+ "TS3011d",
220
+ "TS3012a",
221
+ "TS3012b",
222
+ "TS3012c",
223
+ "TS3012d",
224
+ ]
225
+
226
+ _VALIDATION_SAMPLE_IDS = [
227
+ "ES2011a",
228
+ "ES2011c",
229
+ "IB4001",
230
+ "IB4003",
231
+ "IB4010",
232
+ "IS1008a",
233
+ "IS1008c",
234
+ "TS3004a",
235
+ "TS3004c",
236
+ "ES2011b",
237
+ "ES2011d",
238
+ "IB4002",
239
+ "IB4004",
240
+ "IB4011",
241
+ "IS1008b",
242
+ "IS1008d",
243
+ "TS3004b",
244
+ "TS3004d",
245
+ ]
246
+
247
+ _EVAL_SAMPLE_IDS = [
248
+ "EN2002a",
249
+ "EN2002b",
250
+ "EN2002c",
251
+ "EN2002d",
252
+ "ES2004a",
253
+ "ES2004b",
254
+ "ES2004c",
255
+ "ES2004d",
256
+ "IS1009a",
257
+ "IS1009b",
258
+ "IS1009c",
259
+ "IS1009d",
260
+ "TS3003a",
261
+ "TS3003b",
262
+ "TS3003c",
263
+ "TS3003d",
264
+ ]
265
+
266
+ _SUBSETS = ("ihm",)
267
+
268
+ _BASE_DATA_URL = "https://huggingface.co/datasets/patrickvonplaten/ami-ihm-kaldi-chunked/resolve/main/"
269
+
270
+ _AUDIO_ARCHIVE_URL = _BASE_DATA_URL + "audio/{subset}/{split}/{_id}.tar.gz"
271
+
272
+ _ANNOTATIONS_ARCHIVE_URL = _BASE_DATA_URL + "annotations/{split}/text"
273
+
274
+ logger = datasets.utils.logging.get_logger(__name__)
275
+
276
+
277
+ class AMIConfig(datasets.BuilderConfig):
278
+ """BuilderConfig for AMI."""
279
+
280
+ def __init__(self, name, *args, **kwargs):
281
+ """BuilderConfig for AMI"""
282
+ super().__init__(name=name, *args, **kwargs)
283
+
284
+
285
+ class AMI(datasets.GeneratorBasedBuilder):
286
+ """
287
+ GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
288
+ labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
289
+ and unsupervised training (this implementation contains only labelled data for now).
290
+ Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
291
+ and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
292
+ sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
293
+ for speech recognition training, and to filter out segments with low-quality transcription. For system training,
294
+ GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
295
+ For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
296
+ and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
297
+ are re-processed by professional human transcribers to ensure high transcription quality.
298
+ """
299
+
300
+ VERSION = datasets.Version("1.0.0")
301
+
302
+ BUILDER_CONFIGS = [
303
+ AMIConfig(name=subset) for subset in _SUBSETS
304
+ ]
305
+
306
+ DEFAULT_WRITER_BATCH_SIZE = 128
307
+
308
+ def _info(self):
309
+ features = datasets.Features(
310
+ {
311
+ "segment_id": datasets.Value("string"),
312
+ "audio_id": datasets.Value("string"),
313
+ "text": datasets.Value("string"),
314
+ "audio": datasets.Audio(sampling_rate=16_000),
315
+ "begin_time": datasets.Value("float32"),
316
+ "end_time": datasets.Value("float32"),
317
+ "microphone_id": datasets.Value("string"),
318
+ "speaker_id": datasets.Value("string"),
319
+ }
320
+ )
321
+ return datasets.DatasetInfo(
322
+ description=_DESCRIPTION,
323
+ features=features,
324
+ homepage=_HOMEPAGE,
325
+ license=_LICENSE,
326
+ citation=_CITATION,
327
+ )
328
+
329
+ def _split_generators(self, dl_manager):
330
+ train_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="train", _id=m) for m in _TRAIN_SAMPLE_IDS}
331
+ dev_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="dev", _id=m) for m in _VALIDATION_SAMPLE_IDS}
332
+ eval_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="eval", _id=m) for m in _EVAL_SAMPLE_IDS}
333
+
334
+ train_audio_archives = dl_manager.download_and_extract(train_audio_files)
335
+ dev_audio_archives = dl_manager.download_and_extract(dev_audio_files)
336
+ eval_audio_archives = dl_manager.download_and_extract(eval_audio_files)
337
+
338
+ train_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="train"))
339
+ dev_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="dev"))
340
+ eval_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="eval"))
341
+
342
+ return [
343
+ datasets.SplitGenerator(
344
+ name=datasets.Split.TRAIN,
345
+ gen_kwargs={"audio": train_audio_archives, "annotation": train_annotation, "split": "train"},
346
+ ),
347
+ datasets.SplitGenerator(
348
+ name=datasets.Split.VALIDATION,
349
+ gen_kwargs={"audio": dev_audio_archives, "annotation": dev_annotation, "split": "dev"},
350
+ ),
351
+ datasets.SplitGenerator(
352
+ name=datasets.Split.TEST,
353
+ gen_kwargs={"audio": eval_audio_archives, "annotation": eval_annotation, "split": "eval"},
354
+ ),
355
+ ]
356
+
357
+ def _generate_examples(self, audio, annotation, split):
358
+ # open annotation file
359
+ with open(annotation, "r", encoding="utf-8") as f:
360
+ transcriptions = {}
361
+ for line in f.readlines():
362
+ line_items = line.strip().split()
363
+ _id = line_items[0]
364
+ text = " ".join(line_items[1:])
365
+ _, segment_id, microphone_id, speaker_id, begin_time, end_time = _id.split("_")
366
+
367
+ transcriptions[_id] = {
368
+ "audio_id": _id,
369
+ "segment_id": segment_id,
370
+ "text": text,
371
+ "begin_time": int(begin_time) / 100,
372
+ "end_time": int(end_time) / 100,
373
+ "microphone_id": microphone_id,
374
+ "speaker_id": speaker_id,
375
+ }
376
+
377
+ for _audio_id, (transcription_id, result) in enumerate(transcriptions.items()):
378
+ folder_id = result["segment_id"]
379
+ file_name = "_".join([split, transcription_id.lower()]) + ".wav"
380
+ audio_file = os.path.join(audio[folder_id], folder_id, file_name)
381
+ result["audio"] = audio_file
382
+ yield _audio_id, result
383
+