Update librispeech_asr_test.py
Browse files- librispeech_asr_test.py +28 -41
librispeech_asr_test.py
CHANGED
@@ -16,9 +16,13 @@
|
|
16 |
# Lint as: python3
|
17 |
"""Librispeech automatic speech recognition dataset."""
|
18 |
|
|
|
|
|
|
|
|
|
19 |
|
20 |
import datasets
|
21 |
-
|
22 |
|
23 |
|
24 |
_CITATION = """\
|
@@ -98,51 +102,34 @@ class LibrispeechASR(datasets.GeneratorBasedBuilder):
|
|
98 |
"id": datasets.Value("string"),
|
99 |
}
|
100 |
),
|
101 |
-
supervised_keys=("
|
102 |
homepage=_URL,
|
103 |
citation=_CITATION,
|
104 |
-
task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")],
|
105 |
)
|
106 |
|
107 |
def _split_generators(self, dl_manager):
|
108 |
-
archive_path = dl_manager.
|
109 |
-
|
110 |
-
|
111 |
-
return [
|
112 |
-
datasets.SplitGenerator(
|
113 |
-
name=datasets.Split.TEST, gen_kwargs={"files": dl_manager.iter_archive(archive_path["test"])}
|
114 |
-
),
|
115 |
]
|
116 |
|
117 |
-
def _generate_examples(self,
|
118 |
-
"""Generate examples from a
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
if path.endswith(".flac"):
|
124 |
-
id_ = path.split("/")[-1][: -len(".flac")]
|
125 |
-
audio_data[id_] = f.read()
|
126 |
-
elif path.endswith(".trans.txt"):
|
127 |
for line in f:
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
)
|
142 |
-
if audio_data and len(audio_data) == len(transcripts):
|
143 |
-
for transcript in transcripts:
|
144 |
-
audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
|
145 |
-
yield key, {"audio": audio, **transcript}
|
146 |
-
key += 1
|
147 |
-
audio_data = {}
|
148 |
-
transcripts = []
|
|
|
16 |
# Lint as: python3
|
17 |
"""Librispeech automatic speech recognition dataset."""
|
18 |
|
19 |
+
from __future__ import absolute_import, division, print_function
|
20 |
+
|
21 |
+
import glob
|
22 |
+
import os
|
23 |
|
24 |
import datasets
|
25 |
+
|
26 |
|
27 |
|
28 |
_CITATION = """\
|
|
|
102 |
"id": datasets.Value("string"),
|
103 |
}
|
104 |
),
|
105 |
+
supervised_keys=("speech", "text"),
|
106 |
homepage=_URL,
|
107 |
citation=_CITATION,
|
|
|
108 |
)
|
109 |
|
110 |
def _split_generators(self, dl_manager):
|
111 |
+
archive_path = dl_manager.download_and_extract(_DL_URLS[self.config.name])
|
112 |
+
return [
|
113 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path["dev"], "split_name": f"dev_{self.config.name}"}),
|
|
|
|
|
|
|
|
|
114 |
]
|
115 |
|
116 |
+
def _generate_examples(self, archive_path, split_name):
|
117 |
+
"""Generate examples from a Librispeech archive_path."""
|
118 |
+
transcripts_glob = os.path.join(archive_path, split_name, "*/*/*.txt")
|
119 |
+
for transcript_file in glob.glob(transcripts_glob):
|
120 |
+
path = os.path.dirname(transcript_file)
|
121 |
+
with open(os.path.join(path, transcript_file)) as f:
|
|
|
|
|
|
|
|
|
122 |
for line in f:
|
123 |
+
line = line.strip()
|
124 |
+
key, transcript = line.split(" ", 1)
|
125 |
+
audio_file = f"{key}.flac"
|
126 |
+
speaker_id, chapter_id = [int(el) for el in key.split("-")[:2]]
|
127 |
+
example = {
|
128 |
+
"id": key,
|
129 |
+
"speaker_id": speaker_id,
|
130 |
+
"chapter_id": chapter_id,
|
131 |
+
"file": os.path.join(path, audio_file),
|
132 |
+
"audio": os.path.join(path, audio_file),
|
133 |
+
"text": transcript,
|
134 |
+
}
|
135 |
+
yield key, example
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|