Datasets:

Languages:
code
Multilinguality:
multilingual
Size Categories:
unknown
ArXiv:
License:

Error Loading Dataset

#13
by epinnock - opened

dataset = load_dataset(dataset_id, data_dir=data_dir , use_auth_token=True)

Throws the following error:


KeyError Traceback (most recent call last)
Input In [13], in <cell line: 1>()
----> 1 dataset = load_dataset(dataset_id, data_dir=data_dir , use_auth_token=True)

File /usr/local/lib/python3.9/dist-packages/datasets/load.py:1679, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs)
1676 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
1678 # Download and prepare data
-> 1679 builder_instance.download_and_prepare(
1680 download_config=download_config,
1681 download_mode=download_mode,
1682 ignore_verifications=ignore_verifications,
1683 try_from_hf_gcs=try_from_hf_gcs,
1684 use_auth_token=use_auth_token,
1685 )
1687 # Build dataset for splits
1688 keep_in_memory = (
1689 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
1690 )

File /usr/local/lib/python3.9/dist-packages/datasets/builder.py:704, in DatasetBuilder.download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs)
702 logger.warning("HF google storage unreachable. Downloading and preparing it from source")
703 if not downloaded_from_gcs:
--> 704 self._download_and_prepare(
705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
706 )
707 # Sync info
708 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File /usr/local/lib/python3.9/dist-packages/datasets/builder.py:793, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)
789 split_dict.add(split_generator.split_info)
791 try:
792 # Prepare split will record examples associated to the split
--> 793 self._prepare_split(split_generator, **prepare_split_kwargs)
794 except OSError as e:
795 raise OSError(
796 "Cannot find data file. "
797 + (self.manual_download_instructions or "")
798 + "\nOriginal error:\n"
799 + str(e)
800 ) from None

File /usr/local/lib/python3.9/dist-packages/datasets/builder.py:1271, in ArrowBasedBuilder._prepare_split(self, split_generator)
1267 with ArrowWriter(features=self.info.features, path=fpath) as writer:
1268 for key, table in logging.tqdm(
1269 generator, unit=" tables", leave=False, disable=(not logging.is_progress_bar_enabled())
1270 ):
-> 1271 writer.write_table(table)
1272 num_examples, num_bytes = writer.finalize()
1274 split_generator.split_info.num_examples = num_examples

File /usr/local/lib/python3.9/dist-packages/datasets/arrow_writer.py:518, in ArrowWriter.write_table(self, pa_table, writer_batch_size)
516 writer_batch_size = self.writer_batch_size
517 if self.pa_writer is None:
--> 518 self._build_writer(inferred_schema=pa_table.schema)
519 pa_table = table_cast(pa_table, self._schema)
520 batches: List[pa.RecordBatch] = pa_table.to_batches(max_chunksize=writer_batch_size)

File /usr/local/lib/python3.9/dist-packages/datasets/arrow_writer.py:352, in ArrowWriter._build_writer(self, inferred_schema)
350 def _build_writer(self, inferred_schema: pa.Schema):
351 schema = self.schema
--> 352 inferred_features = Features.from_arrow_schema(inferred_schema)
353 if self._features is not None:
354 if self.update_features: # keep original features it they match, or update them

File /usr/local/lib/python3.9/dist-packages/datasets/features/features.py:1533, in Features.from_arrow_schema(cls, pa_schema)
1531 metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
1532 if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
-> 1533 return Features.from_dict(metadata["info"]["features"])
1534 obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema}
1535 return cls(**obj)

File /usr/local/lib/python3.9/dist-packages/datasets/features/features.py:1562, in Features.from_dict(cls, dic)
1537 @classmethod
1538 def from_dict(cls, dic) -> "Features":
1539 """
1540 Construct Features from dict.
1541
(...)
1560 {'_type': Value(dtype='string', id=None)}
1561 """
-> 1562 obj = generate_from_dict(dic)
1563 return cls(**obj)

File /usr/local/lib/python3.9/dist-packages/datasets/features/features.py:1263, in generate_from_dict(obj)
1261 # Otherwise we have a dict or a dataclass
1262 if "_type" not in obj or isinstance(obj["_type"], dict):
-> 1263 return {key: generate_from_dict(value) for key, value in obj.items()}
1264 class_type = globals()[obj.pop("_type")]
1266 if class_type == Sequence:

File /usr/local/lib/python3.9/dist-packages/datasets/features/features.py:1263, in (.0)
1261 # Otherwise we have a dict or a dataclass
1262 if "_type" not in obj or isinstance(obj["_type"], dict):
-> 1263 return {key: generate_from_dict(value) for key, value in obj.items()}
1264 class_type = globals()[obj.pop("_type")]
1266 if class_type == Sequence:

File /usr/local/lib/python3.9/dist-packages/datasets/features/features.py:1267, in generate_from_dict(obj)
1264 class_type = globals()[obj.pop("_type")]
1266 if class_type == Sequence:
-> 1267 return Sequence(feature=generate_from_dict(obj["feature"]), length=obj["length"])
1269 field_names = {f.name for f in fields(class_type)}
1270 return class_type(**{k: v for k, v in obj.items() if k in field_names})

KeyError: 'length'

Sign up or log in to comment