sanchit-gandhi HF staff commited on
Commit
89d79c0
1 Parent(s): ec52119
Files changed (1) hide show
  1. peoples_speech-clean.py +25 -3
peoples_speech-clean.py CHANGED
@@ -72,6 +72,10 @@ _N_SHARDS_URL = _BASE_URL + "n_shards.json"
72
  # relative path to metadata inside dataset's repo
73
  _MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
74
 
 
 
 
 
75
 
76
  class PeoplesSpeechConfig(datasets.BuilderConfig):
77
 
@@ -104,6 +108,7 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
104
  "audio": datasets.Audio(sampling_rate=16_000),
105
  "duration_ms": datasets.Value("int32"),
106
  "text": datasets.Value("string"),
 
107
  }
108
  ),
109
  task_templates=[AutomaticSpeechRecognition()],
@@ -113,6 +118,8 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
113
  )
114
 
115
  def _split_generators(self, dl_manager):
 
 
116
 
117
  if self.config.name == "microset":
118
  # take only first data archive for demo purposes
@@ -157,13 +164,16 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
157
  # In non-streaming mode, we extract the archives to have the data locally:
158
  local_extracted_archive_paths = dl_manager.extract(audio_archive_paths) \
159
  if not dl_manager.is_streaming else \
160
- {split: [None] * len(audio_archive_paths) for split in splits_to_configs}
161
 
162
  manifest_urls = {
163
  split: _MANIFEST_URL.format(split=split, config=config) for split, config in splits_to_configs.items()
164
  }
165
  manifest_paths = dl_manager.download_and_extract(manifest_urls)
166
 
 
 
 
167
  # To access the audio data from the TAR archives using the download manager,
168
  # we have to use the dl_manager.iter_archive method
169
  #
@@ -188,13 +198,14 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
188
  # use iter_archive here to access the files in the TAR archives:
189
  "archives": [dl_manager.iter_archive(path) for path in audio_archive_paths[split]],
190
  "manifest_path": manifest_paths[split],
 
191
  }
192
  )
193
  )
194
 
195
  return split_generators
196
 
197
- def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path):
198
  meta = dict()
199
  with open(manifest_path, "r", encoding="utf-8") as f:
200
  for line in tqdm(f, desc="reading metadata file"):
@@ -211,6 +222,15 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
211
  "duration_ms": duration
212
  }
213
 
 
 
 
 
 
 
 
 
 
214
  for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
215
  # Here we iterate over all the files within the TAR archive:
216
  for audio_filename, audio_file in archive:
@@ -223,5 +243,7 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
223
  "id": audio_filename,
224
  "audio": {"path": path, "bytes": audio_file.read()},
225
  "text": meta[audio_filename]["text"],
226
- "duration_ms": meta[audio_filename]["duration_ms"]
 
227
  }
 
 
72
  # relative path to metadata inside dataset's repo
73
  _MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
74
 
75
+ _WHISPER_TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/peoples_speech-clean/resolve/main/transcription_data/greedy_search/"
76
+
77
+ _WHISPER_TRANSCRIPT_URLs = _WHISPER_TRANSCRIPT_URL + "/{split}-transcription.txt"
78
+
79
 
80
  class PeoplesSpeechConfig(datasets.BuilderConfig):
81
 
 
108
  "audio": datasets.Audio(sampling_rate=16_000),
109
  "duration_ms": datasets.Value("int32"),
110
  "text": datasets.Value("string"),
111
+ "whisper_transcript": datasets.Value("string"),
112
  }
113
  ),
114
  task_templates=[AutomaticSpeechRecognition()],
 
118
  )
119
 
120
  def _split_generators(self, dl_manager):
121
+ if self.config.name not in ["clean", "validation", "test"]:
122
+ raise ValueError("This dataset is only compatible with the `clean` config.")
123
 
124
  if self.config.name == "microset":
125
  # take only first data archive for demo purposes
 
164
  # In non-streaming mode, we extract the archives to have the data locally:
165
  local_extracted_archive_paths = dl_manager.extract(audio_archive_paths) \
166
  if not dl_manager.is_streaming else \
167
+ {split: [None] * len(audio_archive_paths[split]) for split in splits_to_configs}
168
 
169
  manifest_urls = {
170
  split: _MANIFEST_URL.format(split=split, config=config) for split, config in splits_to_configs.items()
171
  }
172
  manifest_paths = dl_manager.download_and_extract(manifest_urls)
173
 
174
+ transcription_urls = {split: _WHISPER_TRANSCRIPT_URLs.format(split=split) for split in splits_to_configs}
175
+ transcript_archive_path = dl_manager.download(transcription_urls)
176
+
177
  # To access the audio data from the TAR archives using the download manager,
178
  # we have to use the dl_manager.iter_archive method
179
  #
 
198
  # use iter_archive here to access the files in the TAR archives:
199
  "archives": [dl_manager.iter_archive(path) for path in audio_archive_paths[split]],
200
  "manifest_path": manifest_paths[split],
201
+ "whisper_transcript": transcript_archive_path[split],
202
  }
203
  )
204
  )
205
 
206
  return split_generators
207
 
208
+ def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path, whisper_transcript):
209
  meta = dict()
210
  with open(manifest_path, "r", encoding="utf-8") as f:
211
  for line in tqdm(f, desc="reading metadata file"):
 
222
  "duration_ms": duration
223
  }
224
 
225
+ whisper_transcripts = []
226
+
227
+ with open(whisper_transcript, encoding="utf-8") as f:
228
+ for row in f:
229
+ whisper_transcripts.append(row.rstrip("\n"))
230
+
231
+ idx = 0
232
+
233
+
234
  for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
235
  # Here we iterate over all the files within the TAR archive:
236
  for audio_filename, audio_file in archive:
 
243
  "id": audio_filename,
244
  "audio": {"path": path, "bytes": audio_file.read()},
245
  "text": meta[audio_filename]["text"],
246
+ "duration_ms": meta[audio_filename]["duration_ms"],
247
+ "whisper_transcript": whisper_transcripts[idx],
248
  }
249
+ idx += 1