kresnik commited on
Commit
b601548
1 Parent(s): dce9e16

Upload librispeech_asr_test.py

Browse files
Files changed (1) hide show
  1. librispeech_asr_test.py +152 -0
librispeech_asr_test.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Librispeech automatic speech recognition dataset."""
18
+
19
+
20
+ import datasets
21
+ from datasets.tasks import AutomaticSpeechRecognition
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{panayotov2015librispeech,
26
+ title={Librispeech: an ASR corpus based on public domain audio books},
27
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
28
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
29
+ pages={5206--5210},
30
+ year={2015},
31
+ organization={IEEE}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
37
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
38
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
39
+ Note that in order to limit the required storage for preparing this dataset, the audio
40
+ is stored in the .flac format and is not converted to a float32 array. To convert, the audio
41
+ file to a float32 array, please make use of the `.map()` function as follows:
42
+ ```python
43
+ import soundfile as sf
44
+ def map_to_array(batch):
45
+ speech_array, _ = sf.read(batch["file"])
46
+ batch["speech"] = speech_array
47
+ return batch
48
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
49
+ ```
50
+ """
51
+
52
+ _URL = "http://www.openslr.org/12"
53
+ _DL_URL = "http://www.openslr.org/resources/12/"
54
+
55
+ _DL_URLS = {
56
+ "clean": {
57
+ "test": _DL_URL + "test-clean.tar.gz",
58
+ },
59
+ "other": {
60
+ "test": _DL_URL + "test-other.tar.gz",
61
+ },
62
+ }
63
+
64
+
65
+ class LibrispeechASRConfig(datasets.BuilderConfig):
66
+ """BuilderConfig for LibriSpeechASR."""
67
+
68
+ def __init__(self, **kwargs):
69
+ """
70
+ Args:
71
+ data_dir: `string`, the path to the folder containing the files in the
72
+ downloaded .tar
73
+ citation: `string`, citation for the data set
74
+ url: `string`, url for information about the data set
75
+ **kwargs: keyword arguments forwarded to super.
76
+ """
77
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
78
+
79
+
80
+ class LibrispeechASR(datasets.GeneratorBasedBuilder):
81
+ """Librispeech dataset."""
82
+
83
+ DEFAULT_WRITER_BATCH_SIZE = 256
84
+ BUILDER_CONFIGS = [
85
+ LibrispeechASRConfig(name="clean", description="'Clean' speech."),
86
+ LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
87
+ ]
88
+
89
+ def _info(self):
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=datasets.Features(
93
+ {
94
+ "file": datasets.Value("string"),
95
+ "audio": datasets.Audio(sampling_rate=16_000),
96
+ "text": datasets.Value("string"),
97
+ "speaker_id": datasets.Value("int64"),
98
+ "chapter_id": datasets.Value("int64"),
99
+ "id": datasets.Value("string"),
100
+ }
101
+ ),
102
+ supervised_keys=("file", "text"),
103
+ homepage=_URL,
104
+ citation=_CITATION,
105
+ task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")],
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+ archive_path = dl_manager.download(_DL_URLS[self.config.name])
110
+
111
+
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.VALIDATION, gen_kwargs={"files": dl_manager.iter_archive(archive_path["dev"])}
115
+ ),
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive_path["test"])}
118
+ ),
119
+ ]
120
+
121
+ def _generate_examples(self, files):
122
+ """Generate examples from a LibriSpeech archive_path."""
123
+ key = 0
124
+ audio_data = {}
125
+ transcripts = []
126
+ for path, f in files:
127
+ if path.endswith(".flac"):
128
+ id_ = path.split("/")[-1][: -len(".flac")]
129
+ audio_data[id_] = f.read()
130
+ elif path.endswith(".trans.txt"):
131
+ for line in f:
132
+ if line:
133
+ line = line.decode("utf-8").strip()
134
+ id_, transcript = line.split(" ", 1)
135
+ audio_file = f"{id_}.flac"
136
+ speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
137
+ transcripts.append(
138
+ {
139
+ "id": id_,
140
+ "speaker_id": speaker_id,
141
+ "chapter_id": chapter_id,
142
+ "file": audio_file,
143
+ "text": transcript,
144
+ }
145
+ )
146
+ if audio_data and len(audio_data) == len(transcripts):
147
+ for transcript in transcripts:
148
+ audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
149
+ yield key, {"audio": audio, **transcript}
150
+ key += 1
151
+ audio_data = {}
152
+ transcripts = []