Failed to load dataset
I tried to load this dataset with load_dataset("allenai/OLMoE-mix-0924", split="train[:2048]")
. This fails with:
TypeError: Couldn't cast array of type
struct<paloma_paragraphs: list<item: null>, paloma_documents: list<item: null>>
to
{'paloma_paragraphs': Sequence(feature=Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), length=-1, id=None)}
There's some indication that this is an issue with the dataset itself:
- https://github.com/huggingface/datasets/issues/5596
- https://huggingface.co/datasets/EleutherAI/pile/discussions/11
Full error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:2013, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
2012 try:
-> 2013 writer.write_table(table)
2014 except CastError as cast_error:
File /usr/local/lib/python3.10/dist-packages/datasets/arrow_writer.py:585, in ArrowWriter.write_table(self, pa_table, writer_batch_size)
584 pa_table = pa_table.combine_chunks()
--> 585 pa_table = table_cast(pa_table, self._schema)
586 if self.embed_local_files:
File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2281, in table_cast(table, schema)
2280 if table.schema != schema:
-> 2281 return cast_table_to_schema(table, schema)
2282 elif table.schema.metadata != schema.metadata:
File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2240, in cast_table_to_schema(table, schema)
2235 raise CastError(
2236 f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match",
2237 table_column_names=table.column_names,
2238 requested_column_names=list(features),
2239 )
-> 2240 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
2241 return pa.Table.from_arrays(arrays, schema=schema)
File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2240, in <listcomp>(.0)
2235 raise CastError(
2236 f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match",
2237 table_column_names=table.column_names,
2238 requested_column_names=list(features),
2239 )
-> 2240 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
2241 return pa.Table.from_arrays(arrays, schema=schema)
File /usr/local/lib/python3.10/dist-packages/datasets/table.py:1795, in _wrap_for_chunked_arrays.<locals>.wrapper(array, *args, **kwargs)
1794 if isinstance(array, pa.ChunkedArray):
-> 1795 return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
1796 else:
File /usr/local/lib/python3.10/dist-packages/datasets/table.py:1795, in <listcomp>(.0)
1794 if isinstance(array, pa.ChunkedArray):
-> 1795 return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
1796 else:
File /usr/local/lib/python3.10/dist-packages/datasets/table.py:2104, in cast_array_to_feature(array, feature, allow_primitive_to_str, allow_decimal_to_str)
2098 return array_cast(
2099 array,
2100 feature(),
2101 allow_primitive_to_str=allow_primitive_to_str,
2102 allow_decimal_to_str=allow_decimal_to_str,
2103 )
-> 2104 raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}")
TypeError: Couldn't cast array of type
struct<paloma_paragraphs: list<item: null>, paloma_documents: list<item: null>>
to
{'paloma_paragraphs': Sequence(feature=Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), length=-1, id=None)}
The above exception was the direct cause of the following exception:
DatasetGenerationError Traceback (most recent call last)
Cell In[15], line 1
----> 1 load_dataset("allenai/OLMoE-mix-0924", split="train[:2048]")
File /usr/local/lib/python3.10/dist-packages/datasets/load.py:2628, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
2625 return builder_instance.as_streaming_dataset(split=split)
2627 # Download and prepare data
-> 2628 builder_instance.download_and_prepare(
2629 download_config=download_config,
2630 download_mode=download_mode,
2631 verification_mode=verification_mode,
2632 num_proc=num_proc,
2633 storage_options=storage_options,
2634 )
2636 # Build dataset for splits
2637 keep_in_memory = (
2638 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
2639 )
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1029, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
1027 if num_proc is not None:
1028 prepare_split_kwargs["num_proc"] = num_proc
-> 1029 self._download_and_prepare(
1030 dl_manager=dl_manager,
1031 verification_mode=verification_mode,
1032 **prepare_split_kwargs,
1033 **download_and_prepare_kwargs,
1034 )
1035 # Sync info
1036 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1124, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
1120 split_dict.add(split_generator.split_info)
1122 try:
1123 # Prepare split will record examples associated to the split
-> 1124 self._prepare_split(split_generator, **prepare_split_kwargs)
1125 except OSError as e:
1126 raise OSError(
1127 "Cannot find data file. "
1128 + (self.manual_download_instructions or "")
1129 + "\nOriginal error:\n"
1130 + str(e)
1131 ) from None
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1884, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size)
1882 job_id = 0
1883 with pbar:
-> 1884 for job_id, done, content in self._prepare_split_single(
1885 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
1886 ):
1887 if done:
1888 result = content
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:2040, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
2038 if isinstance(e, DatasetGenerationError):
2039 raise
-> 2040 raise DatasetGenerationError("An error occurred while generating the dataset") from e
2042 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
DatasetGenerationError: An error occurred while generating the dataset
In case it helps others, I seem to be able to stream the dataset load_dataset("allenai/OLMoE-mix-0924", split="train", streaming=True)
Hi ! Yes datasets
expects the fields of each example to have the same subfields and types, but in the dataset some samples have additional fields and subfields.
I opened a PR to this dataset fix this issue using a workaround. It set the string type for the fields that cause problems in YAML.
The PR is here: https://huggingface.co/datasets/allenai/OLMoE-mix-0924/discussions/4
Merged!!
Hi there, I run into a similar but slightly different issue than this thread, and I'm using the latest version, I got:
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1870, in _prepare_split_single
writer.write_table(table)
File "/usr/local/lib/python3.10/dist-packages/datasets/arrow_writer.py", line 622, in write_table
pa_table = table_cast(pa_table, self._schema)
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 2292, in table_cast
return cast_table_to_schema(table, schema)
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 2245, in cast_table_to_schema
arrays = [
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 2246, in
cast_array_to_feature(
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 1795, in wrapper
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 1795, in
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 2102, in cast_array_to_feature
return array_cast(
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 1797, in wrapper
return func(array, *args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 1950, in array_cast
raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}")
TypeError: Couldn't cast array of type struct<paloma_paragraphs: list<item: list<item: int64>>> to string
May I ask if you guys could reopen this thread and take a look at this issue? Thanks!
@ziyueliu12138 I will take a look at this and get back to you.
@ziyueliu12138 could you try now?
I got the following error: (I also tried re-download the dataset but it gave me the same error)
Generating train split: 0 examples [00:00, ? examples/s]
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1870, in _prepare_split_single
writer.write_table(table)
File "/usr/local/lib/python3.10/dist-packages/datasets/arrow_writer.py", line 622, in write_table
pa_table = table_cast(pa_table, self._schema)
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 2292, in table_cast
return cast_table_to_schema(table, schema)
File "/usr/local/lib/python3.10/dist-packages/datasets/table.py", line 2240, in cast_table_to_schema
raise CastError(
datasets.table.CastError: Couldn't cast
added: string
attributes: struct<paloma_paragraphs: list<item: list<item: int64>>>
child 0, paloma_paragraphs: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
created: string
doc: struct<arxiv_id: string, language: string, timestamp: timestamp[s], url: string, yymm: string>
child 0, arxiv_id: string
child 1, language: string
child 2, timestamp: timestamp[s]
child 3, url: string
child 4, yymm: string
id: string
metadata: struct<provenance: string>
child 0, provenance: string
text: string
to
{'id': Value(dtype='string', id=None), 'text': Value(dtype='string', id=None), 'added': Value(dtype='string', id=None), 'created': Value(dtype='string', id=None)}
because column names don't match
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "", line 1, in
File "/usr/local/lib/python3.10/dist-packages/datasets/load.py", line 2154, in load_dataset
builder_instance.download_and_prepare(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 924, in download_and_prepare
self._download_and_prepare(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1000, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1741, in _prepare_split
for job_id, done, content in self._prepare_split_single(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1872, in _prepare_split_single
raise DatasetGenerationCastError.from_cast_error(
datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset
All the data files must have the same columns, but at some point there are 3 new columns ({'attributes', 'doc', 'metadata'})
This happened while the json dataset builder was generating data using
/datasets/.cache/huggingface/hub/datasets--allenai--OLMoE-mix-0924/snapshots/1e44595eaffc7491dfab23947ea4d5a62b33aff3/data/algebraic-stack/algebraic-stack-train-0000.json.gz
Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)