Datasets:

Languages:
English
Multilinguality:
monolingual
ArXiv:
Tags:
License:
polinaeterna HF staff commited on
Commit
c8869d5
1 Parent(s): ee802ec

refactor split_generator

Browse files
Files changed (1) hide show
  1. ami.py +30 -18
ami.py CHANGED
@@ -263,6 +263,12 @@ _EVAL_SAMPLE_IDS = [
263
  "TS3003d",
264
  ]
265
 
 
 
 
 
 
 
266
  _SUBSETS = ("ihm",)
267
 
268
  _BASE_DATA_URL = "https://huggingface.co/datasets/edinburghcstr/ami/resolve/main/"
@@ -314,43 +320,49 @@ class AMI(datasets.GeneratorBasedBuilder):
314
  )
315
 
316
  def _split_generators(self, dl_manager):
317
- train_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="train", _id=m) for m in _TRAIN_SAMPLE_IDS}
318
- dev_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="dev", _id=m) for m in _VALIDATION_SAMPLE_IDS}
319
- eval_audio_files = {m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split="eval", _id=m) for m in _EVAL_SAMPLE_IDS}
320
 
321
- train_audio_archives = dl_manager.download(train_audio_files)
322
- dev_audio_archives = dl_manager.download(dev_audio_files)
323
- eval_audio_archives = dl_manager.download(eval_audio_files)
 
 
 
 
 
 
324
 
325
- train_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="train"))
326
- dev_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="dev"))
327
- eval_annotation = dl_manager.download_and_extract(_ANNOTATIONS_ARCHIVE_URL.format(split="eval"))
328
 
329
  return [
330
  datasets.SplitGenerator(
331
  name=datasets.Split.TRAIN,
332
  gen_kwargs={
333
- "audio_archives": [dl_manager.iter_archive(archive) for archive in train_audio_archives.values()],
334
- "local_extracted_archives_paths": dl_manager.extract(train_audio_archives).values() if not dl_manager.is_streaming else [None] * len(train_audio_archives),
335
- "annotation": train_annotation,
 
336
  "split": "train"
337
  },
338
  ),
339
  datasets.SplitGenerator(
340
  name=datasets.Split.VALIDATION,
341
  gen_kwargs={
342
- "audio_archives": [dl_manager.iter_archive(archive) for archive in dev_audio_archives.values()],
343
- "local_extracted_archives_paths": dl_manager.extract(dev_audio_archives).values() if not dl_manager.is_streaming else [None] * len(dev_audio_archives),
344
- "annotation": dev_annotation,
 
345
  "split": "dev"
346
  },
347
  ),
348
  datasets.SplitGenerator(
349
  name=datasets.Split.TEST,
350
  gen_kwargs={
351
- "audio_archives": [dl_manager.iter_archive(archive) for archive in eval_audio_archives.values()],
352
- "local_extracted_archives_paths": dl_manager.extract(eval_audio_archives).values() if not dl_manager.is_streaming else [None] * len(eval_audio_archives),
353
- "annotation": eval_annotation,
 
354
  "split": "eval"
355
  },
356
  ),
 
263
  "TS3003d",
264
  ]
265
 
266
+ _SAMPLE_IDS = {
267
+ "train": _TRAIN_SAMPLE_IDS,
268
+ "dev": _VALIDATION_SAMPLE_IDS,
269
+ "eval": _EVAL_SAMPLE_IDS,
270
+ }
271
+
272
  _SUBSETS = ("ihm",)
273
 
274
  _BASE_DATA_URL = "https://huggingface.co/datasets/edinburghcstr/ami/resolve/main/"
 
320
  )
321
 
322
  def _split_generators(self, dl_manager):
323
+ splits = ["train", "dev", "eval"]
 
 
324
 
325
+ audio_archives_urls = {}
326
+ for split in splits:
327
+ audio_archives_urls[split] = {
328
+ m: _AUDIO_ARCHIVE_URL.format(subset=self.config.name, split=split, _id=m) for m in _SAMPLE_IDS[split]
329
+ }
330
+ audio_archives = dl_manager.download(audio_archives_urls)
331
+ local_extracted_archives_paths = dl_manager.extract(audio_archives) if not dl_manager.is_streaming else {
332
+ split: [None] * len(audio_archives[split]) for split in splits
333
+ }
334
 
335
+ annotations_urls = {split: _ANNOTATIONS_ARCHIVE_URL.format(split=split) for split in splits}
336
+ annotations = dl_manager.download(annotations_urls)
 
337
 
338
  return [
339
  datasets.SplitGenerator(
340
  name=datasets.Split.TRAIN,
341
  gen_kwargs={
342
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in
343
+ audio_archives["train"].values()],
344
+ "local_extracted_archives_paths": local_extracted_archives_paths["train"],
345
+ "annotation": annotations["train"],
346
  "split": "train"
347
  },
348
  ),
349
  datasets.SplitGenerator(
350
  name=datasets.Split.VALIDATION,
351
  gen_kwargs={
352
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in
353
+ audio_archives["dev"].values()],
354
+ "local_extracted_archives_paths": local_extracted_archives_paths["dev"],
355
+ "annotation": annotations["dev"],
356
  "split": "dev"
357
  },
358
  ),
359
  datasets.SplitGenerator(
360
  name=datasets.Split.TEST,
361
  gen_kwargs={
362
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in
363
+ audio_archives["eval"].values()],
364
+ "local_extracted_archives_paths": local_extracted_archives_paths["eval"],
365
+ "annotation": annotations["eval"],
366
  "split": "eval"
367
  },
368
  ),