lhoestq HF staff commited on
Commit
38fe0f1
1 Parent(s): b710cca

Update dataset script

Browse files

I removed the import of `xopen` which is not necessary, and simplified it using the same code style as the other audio datasets

cc

@polinaeterna



@anton-l



This should fix https://huggingface.co/datasets/facebook/multilingual_librispeech/discussions/2

Files changed (1) hide show
  1. multilingual_librispeech.py +66 -107
multilingual_librispeech.py CHANGED
@@ -16,12 +16,9 @@
16
  # Lint as: python3
17
  """Multilingual Librispeech automatic speech recognition dataset."""
18
 
19
-
20
- from functools import partial
21
  import os
22
 
23
  import datasets
24
- from datasets.utils.streaming_download_manager import xopen
25
 
26
 
27
  _CITATION = """\
@@ -62,7 +59,7 @@ class MultilingualLibrispeechConfig(datasets.BuilderConfig):
62
  version=datasets.Version("2.1.0", ""), name=name, **kwargs
63
  )
64
  # relative path to full data inside a repo (for example `data/mls_german`)
65
- self.data_root_dir = _DL_URL_FORMAT.format(name=name)
66
 
67
 
68
  class MultilingualLibrispeech(datasets.GeneratorBasedBuilder):
@@ -98,46 +95,68 @@ class MultilingualLibrispeech(datasets.GeneratorBasedBuilder):
98
  )
99
 
100
  def _split_generators(self, dl_manager):
101
- download_kwargs = {
102
- "dl_manager": dl_manager,
103
- "root_dir": self.config.data_root_dir
104
- }
105
- download_transcript = partial(
106
- download_extract_transcript, **download_kwargs
107
- )
108
- download_audio_non_streaming = partial(
109
- download_extract_audio_archives, **download_kwargs
110
- )
111
- download_audio_streaming = partial(
112
- download_audio_archives, **download_kwargs
113
- )
114
- download_limited_ids = partial(
115
- download_extract_limited_ids, **download_kwargs
116
  )
117
-
118
- train_kwargs = {
119
- "transcript_path": download_transcript(split="train"),
120
- "audio_archives": download_audio_streaming(split="train"),
121
- "local_audio_archives_paths": download_audio_non_streaming(split="train")
122
- if not dl_manager.is_streaming else None
123
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  train_splits = [
126
  datasets.SplitGenerator(
127
- name=datasets.Split.TRAIN, gen_kwargs=train_kwargs
 
 
 
 
 
128
  ),
129
  datasets.SplitGenerator(
130
  name="train.9h",
131
  gen_kwargs={
132
- **train_kwargs,
133
- "limited_ids_paths": download_limited_ids(sub_folder="limited_supervision/9hr"),
 
 
134
  },
135
  ),
136
  datasets.SplitGenerator(
137
  name="train.1h",
138
  gen_kwargs={
139
- **train_kwargs,
140
- "limited_ids_paths": download_limited_ids(sub_folder="limited_supervision/1hr"),
 
 
141
  },
142
  ),
143
  ]
@@ -145,23 +164,21 @@ class MultilingualLibrispeech(datasets.GeneratorBasedBuilder):
145
  return train_splits + [
146
  datasets.SplitGenerator(
147
  name=datasets.Split.VALIDATION, gen_kwargs={
148
- "transcript_path": download_transcript(split="dev"),
149
- "audio_archives": download_audio_streaming(split="dev"),
150
- "local_audio_archives_paths": download_audio_non_streaming(split="dev")
151
- if not dl_manager.is_streaming else None
152
  }
153
  ),
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TEST, gen_kwargs={
156
- "transcript_path": download_transcript(split="test"),
157
- "audio_archives": download_audio_streaming(split="test"),
158
- "local_audio_archives_paths": download_audio_non_streaming(split="test")
159
- if not dl_manager.is_streaming else None
160
  }
161
  ),
162
  ]
163
 
164
- def _generate_examples(self, transcript_path, audio_archives, local_audio_archives_paths, limited_ids_paths=None):
165
  """Generate examples from a Multilingual LibriSpeech data dir."""
166
  transcripts = dict()
167
  with open(transcript_path, "r", encoding="utf-8") as file:
@@ -192,76 +209,18 @@ class MultilingualLibrispeech(datasets.GeneratorBasedBuilder):
192
  # this only can be true in limited supervision sets ("train.9h" and "train.1h")
193
  continue
194
 
195
- path = os.path.join(local_audio_archives_paths[archive_idx], audio_filename)\
196
- if local_audio_archives_paths else audio_filename
 
 
197
  yield audio_filename, {
198
- "file": path if local_audio_archives_paths else None,
199
- "audio": {"path": path, "bytes": file.read()},
 
 
 
200
  "text": audio_transcript,
201
  "speaker_id": speaker_id,
202
  "chapter_id": chapter_id,
203
  "id": audio_id
204
  }
205
-
206
-
207
- def download_extract_limited_ids(dl_manager, root_dir, sub_folder):
208
- """Download handles.txt files containing ids for limited supervision train sets. """
209
-
210
- sub_path = os.path.join(root_dir, "train", sub_folder)
211
-
212
- if sub_folder.endswith("9hr"):
213
- limited_ids_paths = [os.path.join(sub_path, "handles.txt")]
214
- else: # => sub_folder.endswith("1hr")
215
- # in case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like:
216
- # "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ...
217
- limited_ids_paths = [os.path.join(sub_path, str(i), "handles.txt") for i in range(6)]
218
-
219
- limited_ids_paths = dl_manager.download(limited_ids_paths)
220
-
221
- return limited_ids_paths
222
-
223
-
224
- def download_extract_transcript(dl_manager, root_dir, split):
225
- """
226
- Download file with audio transcriptions.
227
-
228
- Return:
229
- path (str): path to locally extracted `transcripts.txt` file
230
- """
231
- transcript_path = os.path.join(root_dir, split, "transcripts.txt")
232
- return dl_manager.download(transcript_path)
233
-
234
-
235
- def download_audio_archive_paths(dl_manager, root_dir, split):
236
- # each split contains many .tar.gz archives with its audio files
237
- # audio_filenames.txt contains the names of these archives
238
- split_dir = os.path.join(root_dir, split)
239
- audio_filenames_path = dl_manager.download(os.path.join(split_dir, "audio_filenames.txt"))
240
-
241
- with open(audio_filenames_path, "r", encoding="utf-8") as file:
242
- audio_filenames = [line.strip() for line in file.readlines()]
243
-
244
- return dl_manager.download([os.path.join(split_dir, "audio", filename) for filename in audio_filenames])
245
-
246
-
247
- # for non-streaming case
248
- def download_extract_audio_archives(dl_manager, root_dir, split):
249
- """
250
- Download and extract audio archives locally.
251
-
252
- Return:
253
- archive_paths (List `str`): paths to locally extracted archives
254
- """
255
- archive_paths = download_audio_archive_paths(dl_manager, root_dir, split)
256
- return [dl_manager.extract(archive_path) for archive_path in archive_paths]
257
-
258
-
259
- # for streaming case
260
- def download_audio_archives(dl_manager, root_dir, split):
261
- """Prepare archives with audio files for iterating over them.
262
-
263
- Return:
264
- audio_archives (List `Generator`): list of generators to iterate over files in each audio archive.
265
- """
266
- archive_paths = download_audio_archive_paths(dl_manager, root_dir, split)
267
- return [dl_manager.iter_archive(archive_path) for archive_path in archive_paths]
 
16
  # Lint as: python3
17
  """Multilingual Librispeech automatic speech recognition dataset."""
18
 
 
 
19
  import os
20
 
21
  import datasets
 
22
 
23
 
24
  _CITATION = """\
 
59
  version=datasets.Version("2.1.0", ""), name=name, **kwargs
60
  )
61
  # relative path to full data inside a repo (for example `data/mls_german`)
62
+ self.data_root_url = _DL_URL_FORMAT.format(name=name)
63
 
64
 
65
  class MultilingualLibrispeech(datasets.GeneratorBasedBuilder):
 
95
  )
96
 
97
  def _split_generators(self, dl_manager):
98
+
99
+ transcripts = dl_manager.download({
100
+ "train": self.config.data_root_url + "/train/transcripts.txt",
101
+ "dev": self.config.data_root_url + "/dev/transcripts.txt",
102
+ "test": self.config.data_root_url + "/test/transcripts.txt",
103
+ })
104
+
105
+ # Download handles.txt files containing ids for limited supervision train sets
106
+ limited_supervision_9h = dl_manager.download(
107
+ [self.config.data_root_url + "/train/limited_supervision/9hr/handles.txt"],
 
 
 
 
 
108
  )
109
+ # in our case of 1 hour limited supervision ("train.1h") there are always 6 subfolders like:
110
+ # "limited_supervision/1h/0/handles.txt", "limited_supervision/1h/1/handles.txt", ...
111
+ limited_supervision_1h = dl_manager.download([
112
+ self.config.data_root_url + f"/train/limited_supervision/1hr/{i}/handles.txt" for i in range(6)
113
+ ])
114
+
115
+ # each split contains many .tar.gz archives with its audio files
116
+ # audio_filenames.txt contains the names of these archives
117
+ audio_filenames_paths = dl_manager.download({
118
+ "train": self.config.data_root_url + "/train/audio_filenames.txt",
119
+ "dev": self.config.data_root_url + "/dev/audio_filenames.txt",
120
+ "test": self.config.data_root_url + "/test/audio_filenames.txt",
121
+ })
122
+
123
+ audio_archives = {}
124
+ for split in audio_filenames_paths:
125
+ with open(audio_filenames_paths[split], encoding="utf-8") as f:
126
+ audio_filenames = [line.strip() for line in f.readlines()]
127
+ audio_archives[split] = dl_manager.download([
128
+ self.config.data_root_url + "/" + split + "/audio/" + filename
129
+ for filename in audio_filenames
130
+ ])
131
+
132
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
133
+ local_extracted_archives = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {}
134
 
135
  train_splits = [
136
  datasets.SplitGenerator(
137
+ name=datasets.Split.TRAIN,
138
+ gen_kwargs={
139
+ "transcript_path": transcripts["train"],
140
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
141
+ "local_extracted_archive": local_extracted_archives.get("train"),
142
+ }
143
  ),
144
  datasets.SplitGenerator(
145
  name="train.9h",
146
  gen_kwargs={
147
+ "transcript_path": transcripts["train"],
148
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
149
+ "local_extracted_archive": local_extracted_archives.get("train"),
150
+ "limited_ids_paths": limited_supervision_9h,
151
  },
152
  ),
153
  datasets.SplitGenerator(
154
  name="train.1h",
155
  gen_kwargs={
156
+ "transcript_path": transcripts["train"],
157
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["train"]],
158
+ "local_extracted_archive": local_extracted_archives.get("train"),
159
+ "limited_ids_paths": limited_supervision_1h,
160
  },
161
  ),
162
  ]
 
164
  return train_splits + [
165
  datasets.SplitGenerator(
166
  name=datasets.Split.VALIDATION, gen_kwargs={
167
+ "transcript_path": transcripts["dev"],
168
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["dev"]],
169
+ "local_extracted_archive": local_extracted_archives.get("dev"),
 
170
  }
171
  ),
172
  datasets.SplitGenerator(
173
  name=datasets.Split.TEST, gen_kwargs={
174
+ "transcript_path": transcripts["test"],
175
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_archives["test"]],
176
+ "local_extracted_archive": local_extracted_archives.get("test"),
 
177
  }
178
  ),
179
  ]
180
 
181
+ def _generate_examples(self, transcript_path, audio_archives, local_extracted_archive, limited_ids_paths=None):
182
  """Generate examples from a Multilingual LibriSpeech data dir."""
183
  transcripts = dict()
184
  with open(transcript_path, "r", encoding="utf-8") as file:
 
209
  # this only can be true in limited supervision sets ("train.9h" and "train.1h")
210
  continue
211
 
212
+ local_audio_file_path = os.path.join(
213
+ local_extracted_archive[archive_idx], audio_filename
214
+ ) if local_extracted_archive else None
215
+
216
  yield audio_filename, {
217
+ "file": local_audio_file_path,
218
+ "audio": {
219
+ "path": local_audio_file_path if local_audio_file_path else audio_filename,
220
+ "bytes": file.read()
221
+ },
222
  "text": audio_transcript,
223
  "speaker_id": speaker_id,
224
  "chapter_id": chapter_id,
225
  "id": audio_id
226
  }