system HF staff commited on
Commit
cf4a40f
1 Parent(s): c91fac7

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (1) hide show
  1. ami.py +4 -7
ami.py CHANGED
@@ -361,12 +361,12 @@ class AMI(datasets.GeneratorBasedBuilder):
361
  def _split_generators(self, dl_manager):
362
 
363
  # multi-processing doesn't work for AMI
364
- if hasattr(dl_manager, "_download_config") and dl_manager._download_config.num_proc != 1:
365
  logger.warning(
366
  "AMI corpus cannot be downloaded using multi-processing. "
367
  "Setting number of downloaded processes `num_proc` to 1. "
368
  )
369
- dl_manager._download_config.num_proc = 1
370
 
371
  annotation_path = dl_manager.download_and_extract(_DL_URL_ANNOTATIONS)
372
 
@@ -512,7 +512,7 @@ class AMI(datasets.GeneratorBasedBuilder):
512
 
513
  # words
514
  words_paths = {
515
- _id: [os.path.join(annotation_path, "words/{}.{}.words.xml".format(_id, speaker)) for speaker in _SPEAKERS]
516
  for _id in ids
517
  }
518
  words_paths = {_id: list(filter(lambda path: os.path.isfile(path), words_paths[_id])) for _id in ids}
@@ -520,10 +520,7 @@ class AMI(datasets.GeneratorBasedBuilder):
520
 
521
  # segments
522
  segments_paths = {
523
- _id: [
524
- os.path.join(annotation_path, "segments/{}.{}.segments.xml".format(_id, speaker))
525
- for speaker in _SPEAKERS
526
- ]
527
  for _id in ids
528
  }
529
  segments_paths = {_id: list(filter(lambda path: os.path.isfile(path), segments_paths[_id])) for _id in ids}
 
361
  def _split_generators(self, dl_manager):
362
 
363
  # multi-processing doesn't work for AMI
364
+ if hasattr(dl_manager, "download_config") and dl_manager.download_config.num_proc != 1:
365
  logger.warning(
366
  "AMI corpus cannot be downloaded using multi-processing. "
367
  "Setting number of downloaded processes `num_proc` to 1. "
368
  )
369
+ dl_manager.download_config.num_proc = 1
370
 
371
  annotation_path = dl_manager.download_and_extract(_DL_URL_ANNOTATIONS)
372
 
 
512
 
513
  # words
514
  words_paths = {
515
+ _id: [os.path.join(annotation_path, f"words/{_id}.{speaker}.words.xml") for speaker in _SPEAKERS]
516
  for _id in ids
517
  }
518
  words_paths = {_id: list(filter(lambda path: os.path.isfile(path), words_paths[_id])) for _id in ids}
 
520
 
521
  # segments
522
  segments_paths = {
523
+ _id: [os.path.join(annotation_path, f"segments/{_id}.{speaker}.segments.xml") for speaker in _SPEAKERS]
 
 
 
524
  for _id in ids
525
  }
526
  segments_paths = {_id: list(filter(lambda path: os.path.isfile(path), segments_paths[_id])) for _id in ids}