File size: 10,059 Bytes
8d2a1a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19c3f36
 
 
8d2a1a7
 
19c3f36
fb2c7de
ba21e14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db3a68
fb2c7de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d10ca4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c3fb2a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41d2b08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4db3a68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb2c7de
ba21e14
 
 
 
 
 
 
 
fb2c7de
 
 
 
 
 
 
 
5d10ca4
 
 
 
 
 
 
 
9c3fb2a
 
 
 
 
 
 
 
41d2b08
 
 
 
4db3a68
 
 
 
8d2a1a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
---
annotations_creators:
- crowdsourced
- machine-generated
language_creators:
- crowdsourced
- machine-generated
language:
- en
license:
- cc-by-2.0
- cc-by-2.5
- cc-by-3.0
- cc-by-4.0
- cc-by-sa-3.0
- cc-by-sa-4.0
multilinguality:
- monolingual
size_categories:
- 1T<n
source_datasets:
- original
task_categories:
- automatic-speech-recognition
task_ids: []
pretty_name: People's Speech
tags:
- robust-speech-recognition
- noisy-speech-recognition
- speech-recognition
dataset_info:
- config_name: clean_sa
  features:
  - name: id
    dtype: string
  - name: audio
    dtype:
      audio:
        sampling_rate: 16000
  - name: duration_ms
    dtype: int32
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 75267509124.558
    num_examples: 257093
  - name: validation
    num_bytes: 2075929254.254
    num_examples: 18622
  - name: test
    num_bytes: 3894954757.41
    num_examples: 34898
  download_size: 72518549222
  dataset_size: 81238393136.222
- config_name: default
  features:
  - name: id
    dtype: string
  - name: audio
    dtype: audio
  - name: duration_ms
    dtype: int32
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 401733771193.124
    num_examples: 1501271
  - name: validation
    num_bytes: 2459781412.24
    num_examples: 18622
  - name: test
    num_bytes: 4324307722.96
    num_examples: 34898
  download_size: 398570070831
  dataset_size: 408517860328.32404
- config_name: dirty
  features:
  - name: id
    dtype: string
  - name: audio
    dtype:
      audio:
        sampling_rate: 16000
  - name: duration_ms
    dtype: int32
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 1569500875399.994
    num_examples: 5476898
  - name: validation
    num_bytes: 2641406179.2539997
    num_examples: 18622
  - name: test
    num_bytes: 5097236056.41
    num_examples: 34898
  download_size: 1496747948260
  dataset_size: 1577239517635.6577
- config_name: dirty_sa
  features:
  - name: id
    dtype: string
  - name: audio
    dtype:
      audio:
        sampling_rate: 16000
  - name: duration_ms
    dtype: int32
  - name: text
    dtype: string
  splits:
  - name: train
    num_bytes: 163776914241.91
    num_examples: 548014
  - name: validation
    num_bytes: 2075929254.254
    num_examples: 18622
  - name: test
    num_bytes: 3894954757.41
    num_examples: 34898
  download_size: 149326092074
  dataset_size: 169747798253.574
- config_name: test
  features:
  - name: id
    dtype: string
  - name: audio
    dtype:
      audio:
        sampling_rate: 16000
  - name: duration_ms
    dtype: int32
  - name: text
    dtype: string
  splits:
  - name: test
    num_bytes: 3894954757.41
    num_examples: 34898
  download_size: 4087772459
  dataset_size: 3894954757.41
- config_name: validation
  features:
  - name: id
    dtype: string
  - name: audio
    dtype:
      audio:
        sampling_rate: 16000
  - name: duration_ms
    dtype: int32
  - name: text
    dtype: string
  splits:
  - name: validation
    num_bytes: 2075929254.254
    num_examples: 18622
  download_size: 2335244149
  dataset_size: 2075929254.254
configs:
- config_name: clean_sa
  data_files:
  - split: train
    path: clean_sa/train-*
  - split: validation
    path: clean_sa/validation-*
  - split: test
    path: clean_sa/test-*
- config_name: default
  data_files:
  - split: train
    path: data/train-*
  - split: validation
    path: data/validation-*
  - split: test
    path: data/test-*
- config_name: dirty
  data_files:
  - split: train
    path: dirty/train-*
  - split: validation
    path: dirty/validation-*
  - split: test
    path: dirty/test-*
- config_name: dirty_sa
  data_files:
  - split: train
    path: dirty_sa/train-*
  - split: validation
    path: dirty_sa/validation-*
  - split: test
    path: dirty_sa/test-*
- config_name: test
  data_files:
  - split: test
    path: test/test-*
- config_name: validation
  data_files:
  - split: validation
    path: validation/validation-*
---

# Dataset Card for People's Speech

## Table of Contents
- [Dataset Description](#dataset-description)
  - [Dataset Summary](#dataset-summary)
  - [Supported Tasks](#supported-tasks-and-leaderboards)
  - [Languages](#languages)
- [Dataset Structure](#dataset-structure)
  - [Data Instances](#data-instances)
  - [Data Fields](#data-instances)
  - [Data Splits](#data-instances)
- [Dataset Creation](#dataset-creation)
  - [Curation Rationale](#curation-rationale)
  - [Source Data](#source-data)
  - [Annotations](#annotations)
  - [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
  - [Social Impact of Dataset](#social-impact-of-dataset)
  - [Discussion of Biases](#discussion-of-biases)
  - [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
  - [Dataset Curators](#dataset-curators)
  - [Licensing Information](#licensing-information)
  - [Citation Information](#citation-information)

## Dataset Description

- **Homepage:** https://mlcommons.org/en/peoples-speech/
- **Repository:** https://github.com/mlcommons/peoples-speech
- **Paper:** https://arxiv.org/abs/2111.09344
- **Leaderboard:** [Needs More Information]
- **Point of Contact:** [datasets@mlcommons.org](mailto:datasets@mlcommons.org)

### Dataset Summary

The People's Speech Dataset is among the world's largest English speech recognition corpus today that is licensed for academic and commercial usage under CC-BY-SA and CC-BY 4.0. It includes 30,000+ hours of transcribed speech in English languages with a diverse set of speakers. This open dataset is large enough to train speech-to-text systems and crucially is available with a permissive license.

### Supported Tasks and Leaderboards

[Needs More Information]

### Languages

English

## Dataset Structure

### Data Instances

{
    "id": "gov_DOT_uscourts_DOT_scotus_DOT_19-161/gov_DOT_uscourts_DOT_scotus_DOT_19-161_DOT_2020-03-02_DOT_mp3_00002.flac",
    "audio": {
        "path": "gov_DOT_uscourts_DOT_scotus_DOT_19-161/gov_DOT_uscourts_DOT_scotus_DOT_19-161_DOT_2020-03-02_DOT_mp3_00002.flac"
        "array": array([-6.10351562e-05, ...]),
        "sampling_rate": 16000
    }
    "duration_ms": 14490,
    "text": "contends that the suspension clause requires a [...]"
}

### Data Fields

{
    "id": datasets.Value("string"),
    "audio": datasets.Audio(sampling_rate=16_000),
    "duration_ms": datasets.Value("int32"),
    "text": datasets.Value("string"),
}

### Data Splits

We provide the following configurations for the dataset: `cc-by-clean`, `cc-by-dirty`, `cc-by-sa-clean`, `cc-by-sa-dirty`, and `microset`. We don't provide splits for any of the configurations.

## Dataset Creation

### Curation Rationale

See our [paper](https://arxiv.org/abs/2111.09344).

### Source Data

#### Initial Data Collection and Normalization

Data was downloaded via the archive.org API. No data inference was done.

#### Who are the source language producers?

[Needs More Information]

### Annotations

#### Annotation process

No manual annotation is done. We download only source audio with already existing transcripts.

#### Who are the annotators?

For the test and dev sets, we paid native American English speakers to do transcriptions. We do not know the identities of the transcriptionists for data in the training set. For the training set, we have noticed that some transcriptions are likely to be the output of automatic speech recognition systems. 

### Personal and Sensitive Information

Several of our sources are legal and government proceedings, spoken histories, speeches, and so on. Given that these were intended as public documents and licensed as such, it is natural that the involved individuals are aware of this.

## Considerations for Using the Data

### Social Impact of Dataset

The dataset could be used for speech synthesis. However, this requires careful cleaning of the dataset, as background noise is not tolerable for speech synthesis.

The dataset could be used for keyword spotting tasks as well. In particular, this is good use case for the non-English audio in the dataset.

Our sincere hope is that the large breadth of sources our dataset incorporates reduces existing quality of service issues today, like speech recognition system’s poor understanding of non-native English accents. We cannot think of any unfair treatment that come from using this dataset at this time.


### Discussion of Biases

Our data is downloaded from archive.org. As such, the data is biased towards whatever users decide to upload there.

Almost all of our data is American accented English.

### Other Known Limitations

As of version 1.0, a portion of data in the training, test, and dev sets is poorly aligned. Specifically, some words appear in the transcript, but not the audio, or some words appear in the audio, but not the transcript. We are working on it.

## Additional Information

### Dataset Curators

[Needs More Information]

### Licensing Information

We provide CC-BY and CC-BY-SA subsets of the dataset.

### Citation Information

Please cite:

```
@article{DBLP:journals/corr/abs-2111-09344,
  author    = {Daniel Galvez and
               Greg Diamos and
               Juan Ciro and
               Juan Felipe Cer{\'{o}}n and
               Keith Achorn and
               Anjali Gopi and
               David Kanter and
               Maximilian Lam and
               Mark Mazumder and
               Vijay Janapa Reddi},
  title     = {The People's Speech: {A} Large-Scale Diverse English Speech Recognition
               Dataset for Commercial Usage},
  journal   = {CoRR},
  volume    = {abs/2111.09344},
  year      = {2021},
  url       = {https://arxiv.org/abs/2111.09344},
  eprinttype = {arXiv},
  eprint    = {2111.09344},
  timestamp = {Mon, 22 Nov 2021 16:44:07 +0100},
  biburl    = {https://dblp.org/rec/journals/corr/abs-2111-09344.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
```