bhigy commited on
Commit
2b6f1c4
1 Parent(s): 2d7620b

bugfix - runtime error "sharding is ambiguous"

Browse files

When trying to load the dataset with the split "microphone-single", I
get following error:
```
RuntimeError: Sharding is ambiguous for this dataset: we found several data sources lists of different lengths, and we don't know over which list we should parallelize:
- key samples_paths has length 134
- key ids has length 136
- key verification_ids has length 134
To fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.
```

This is due to `ids` still containing the missing files for that split.

Files changed (1) hide show
  1. ami.py +4 -4
ami.py CHANGED
@@ -371,10 +371,10 @@ class AMI(datasets.GeneratorBasedBuilder):
371
  annotation_path = dl_manager.download_and_extract(_DL_URL_ANNOTATIONS)
372
 
373
  # train
374
- train_files = [path.format(_id, _id) for _id in _TRAIN_SAMPLE_IDS for path in self.config.dl_path_formats]
375
- train_files = list(
376
- filter(lambda f: f.split("/")[-1].split(".")[0] not in self.config.missing_files, train_files)
377
  )
 
378
  train_ids = [f.split("/")[-1].split(".")[0] for f in train_files]
379
  train_path = dl_manager.download_and_extract(train_files)
380
 
@@ -396,7 +396,7 @@ class AMI(datasets.GeneratorBasedBuilder):
396
  gen_kwargs={
397
  "annotation_path": annotation_path,
398
  "samples_paths": train_path,
399
- "ids": _TRAIN_SAMPLE_IDS,
400
  "verification_ids": train_ids,
401
  },
402
  ),
 
371
  annotation_path = dl_manager.download_and_extract(_DL_URL_ANNOTATIONS)
372
 
373
  # train
374
+ train_sample_ids = list(
375
+ filter(lambda f: f not in self.config.missing_files, _TRAIN_SAMPLE_IDS)
 
376
  )
377
+ train_files = [path.format(_id, _id) for _id in train_sample_ids for path in self.config.dl_path_formats]
378
  train_ids = [f.split("/")[-1].split(".")[0] for f in train_files]
379
  train_path = dl_manager.download_and_extract(train_files)
380
 
 
396
  gen_kwargs={
397
  "annotation_path": annotation_path,
398
  "samples_paths": train_path,
399
+ "ids": train_sample_ids,
400
  "verification_ids": train_ids,
401
  },
402
  ),