Sreyan88 commited on
Commit
71081db
1 Parent(s): 3276161

Upload librispeech_asr.py

Browse files
Files changed (1) hide show
  1. librispeech_asr.py +272 -0
librispeech_asr.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Librispeech automatic speech recognition dataset."""
18
+
19
+
20
+ import os
21
+
22
+ import datasets
23
+ from datasets.tasks import AutomaticSpeechRecognition
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{panayotov2015librispeech,
28
+ title={Librispeech: an ASR corpus based on public domain audio books},
29
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
30
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
31
+ pages={5206--5210},
32
+ year={2015},
33
+ organization={IEEE}
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
39
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
40
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
41
+ """
42
+
43
+ _URL = "http://www.openslr.org/12"
44
+ _DL_URL = "http://www.openslr.org/resources/12/"
45
+
46
+
47
+ _DL_URLS = {
48
+ "clean": {
49
+ "dev": _DL_URL + "dev-clean.tar.gz",
50
+ "test": _DL_URL + "test-clean.tar.gz",
51
+ "train.100": _DL_URL + "train-clean-100.tar.gz",
52
+ },
53
+ "other": {
54
+ "test": _DL_URL + "test-other.tar.gz",
55
+ "dev": _DL_URL + "dev-other.tar.gz",
56
+ "train.500": _DL_URL + "train-other-500.tar.gz",
57
+ },
58
+ "all": {
59
+ "dev.clean": _DL_URL + "dev-clean.tar.gz",
60
+ "dev.other": _DL_URL + "dev-other.tar.gz",
61
+ "test.clean": _DL_URL + "test-clean.tar.gz",
62
+ "test.other": _DL_URL + "test-other.tar.gz",
63
+ "train.clean.100": _DL_URL + "train-clean-100.tar.gz",
64
+ "train.clean.360": _DL_URL + "train-clean-360.tar.gz",
65
+ "train.other.500": _DL_URL + "train-other-500.tar.gz",
66
+ },
67
+ }
68
+
69
+
70
+ class LibrispeechASRConfig(datasets.BuilderConfig):
71
+ """BuilderConfig for LibriSpeechASR."""
72
+
73
+ def __init__(self, **kwargs):
74
+ """
75
+ Args:
76
+ data_dir: `string`, the path to the folder containing the files in the
77
+ downloaded .tar
78
+ citation: `string`, citation for the data set
79
+ url: `string`, url for information about the data set
80
+ **kwargs: keyword arguments forwarded to super.
81
+ """
82
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
83
+
84
+
85
+ class LibrispeechASR(datasets.GeneratorBasedBuilder):
86
+ """Librispeech dataset."""
87
+
88
+ DEFAULT_WRITER_BATCH_SIZE = 256
89
+ DEFAULT_CONFIG_NAME = "all"
90
+ BUILDER_CONFIGS = [
91
+ LibrispeechASRConfig(name="clean", description="'Clean' speech."),
92
+ LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
93
+ LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
94
+ ]
95
+
96
+ def _info(self):
97
+ return datasets.DatasetInfo(
98
+ description=_DESCRIPTION,
99
+ features=datasets.Features(
100
+ {
101
+ "file": datasets.Value("string"),
102
+ "audio": datasets.Audio(sampling_rate=16_000),
103
+ "text": datasets.Value("string"),
104
+ "speaker_id": datasets.Value("int64"),
105
+ "chapter_id": datasets.Value("int64"),
106
+ "id": datasets.Value("string"),
107
+ }
108
+ ),
109
+ supervised_keys=("file", "text"),
110
+ homepage=_URL,
111
+ citation=_CITATION,
112
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+ archive_path = dl_manager.download(_DL_URLS[self.config.name])
117
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
118
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
119
+
120
+ if self.config.name == "clean":
121
+ train_splits = [
122
+ datasets.SplitGenerator(
123
+ name="train.100",
124
+ gen_kwargs={
125
+ "local_extracted_archive": local_extracted_archive.get("train.100"),
126
+ "files": dl_manager.iter_archive(archive_path["train.100"]),
127
+ },
128
+ ),
129
+
130
+ ]
131
+ dev_splits = [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.VALIDATION,
134
+ gen_kwargs={
135
+ "local_extracted_archive": local_extracted_archive.get("dev"),
136
+ "files": dl_manager.iter_archive(archive_path["dev"]),
137
+ },
138
+ )
139
+ ]
140
+ test_splits = [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ gen_kwargs={
144
+ "local_extracted_archive": local_extracted_archive.get("test"),
145
+ "files": dl_manager.iter_archive(archive_path["test"]),
146
+ },
147
+ )
148
+ ]
149
+ elif self.config.name == "other":
150
+ train_splits = [
151
+ datasets.SplitGenerator(
152
+ name="train.500",
153
+ gen_kwargs={
154
+ "local_extracted_archive": local_extracted_archive.get("train.500"),
155
+ "files": dl_manager.iter_archive(archive_path["train.500"]),
156
+ },
157
+ )
158
+ ]
159
+ dev_splits = [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.VALIDATION,
162
+ gen_kwargs={
163
+ "local_extracted_archive": local_extracted_archive.get("dev"),
164
+ "files": dl_manager.iter_archive(archive_path["dev"]),
165
+ },
166
+ )
167
+ ]
168
+ test_splits = [
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TEST,
171
+ gen_kwargs={
172
+ "local_extracted_archive": local_extracted_archive.get("test"),
173
+ "files": dl_manager.iter_archive(archive_path["test"]),
174
+ },
175
+ )
176
+ ]
177
+ elif self.config.name == "all":
178
+ train_splits = [
179
+ datasets.SplitGenerator(
180
+ name="train.clean.100",
181
+ gen_kwargs={
182
+ "local_extracted_archive": local_extracted_archive.get("train.clean.100"),
183
+ "files": dl_manager.iter_archive(archive_path["train.clean.100"]),
184
+ },
185
+ ),
186
+ datasets.SplitGenerator(
187
+ name="train.clean.360",
188
+ gen_kwargs={
189
+ "local_extracted_archive": local_extracted_archive.get("train.clean.360"),
190
+ "files": dl_manager.iter_archive(archive_path["train.clean.360"]),
191
+ },
192
+ ),
193
+ datasets.SplitGenerator(
194
+ name="train.other.500",
195
+ gen_kwargs={
196
+ "local_extracted_archive": local_extracted_archive.get("train.other.500"),
197
+ "files": dl_manager.iter_archive(archive_path["train.other.500"]),
198
+ },
199
+ ),
200
+ ]
201
+ dev_splits = [
202
+ datasets.SplitGenerator(
203
+ name="validation.clean",
204
+ gen_kwargs={
205
+ "local_extracted_archive": local_extracted_archive.get("validation.clean"),
206
+ "files": dl_manager.iter_archive(archive_path["dev.clean"]),
207
+ },
208
+ ),
209
+ datasets.SplitGenerator(
210
+ name="validation.other",
211
+ gen_kwargs={
212
+ "local_extracted_archive": local_extracted_archive.get("validation.other"),
213
+ "files": dl_manager.iter_archive(archive_path["dev.other"]),
214
+ },
215
+ ),
216
+ ]
217
+ test_splits = [
218
+ datasets.SplitGenerator(
219
+ name="test.clean",
220
+ gen_kwargs={
221
+ "local_extracted_archive": local_extracted_archive.get("test.clean"),
222
+ "files": dl_manager.iter_archive(archive_path["test.clean"]),
223
+ },
224
+ ),
225
+ datasets.SplitGenerator(
226
+ name="test.other",
227
+ gen_kwargs={
228
+ "local_extracted_archive": local_extracted_archive.get("test.other"),
229
+ "files": dl_manager.iter_archive(archive_path["test.other"]),
230
+ },
231
+ ),
232
+ ]
233
+
234
+ return train_splits + dev_splits + test_splits
235
+
236
+ def _generate_examples(self, files, local_extracted_archive):
237
+ """Generate examples from a LibriSpeech archive_path."""
238
+ key = 0
239
+ audio_data = {}
240
+ transcripts = []
241
+ for path, f in files:
242
+ if path.endswith(".flac"):
243
+ id_ = path.split("/")[-1][: -len(".flac")]
244
+ audio_data[id_] = f.read()
245
+ elif path.endswith(".trans.txt"):
246
+ for line in f:
247
+ if line:
248
+ line = line.decode("utf-8").strip()
249
+ id_, transcript = line.split(" ", 1)
250
+ audio_file = f"{id_}.flac"
251
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
252
+ audio_file = (
253
+ os.path.join(local_extracted_archive, audio_file)
254
+ if local_extracted_archive
255
+ else audio_file
256
+ )
257
+ transcripts.append(
258
+ {
259
+ "id": id_,
260
+ "speaker_id": speaker_id,
261
+ "chapter_id": chapter_id,
262
+ "file": audio_file,
263
+ "text": transcript,
264
+ }
265
+ )
266
+ if audio_data and len(audio_data) == len(transcripts):
267
+ for transcript in transcripts:
268
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
269
+ yield key, {"audio": audio, **transcript}
270
+ key += 1
271
+ audio_data = {}
272
+ transcripts = []