Datasets:

Languages:
code
Multilinguality:
multilingual
Size Categories:
unknown
ArXiv:
License:

Casting Error Loading dataset (bigcode/the-stack-dedup)

#18
by amezasor - opened

I get the error "Couldn't cast ... because column names don't match"

This is the code:

import os
import datasets as ds

MY_CACHE_DIR = "/my/cache/dir/path"
MY_TOKEN="my_token"

the_stack_ds = ds.load_dataset("bigcode/the-stack-dedup", split="train", download_mode="reuse_cache_if_exists", cache_dir=MY_CACHE_DIR, use_auth_token=MY_TOKEN)

This is the error trace:

HF Dataset Error

ValueError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/datasets/builder.py:1873, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
1866 writer = writer_class(
1867 features=writer._features,
1868 path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
(...)
1871 embed_local_files=embed_local_files,
1872 )
-> 1873 writer.write_table(table)
1874 num_examples_progress_update += len(table)

File ~/.local/lib/python3.8/site-packages/datasets/arrow_writer.py:568, in ArrowWriter.write_table(self, pa_table, writer_batch_size)
567 pa_table = pa_table.combine_chunks()
--> 568 pa_table = table_cast(pa_table, self._schema)
569 if self.embed_local_files:

File ~/.local/lib/python3.8/site-packages/datasets/table.py:2290, in table_cast(table, schema)
2289 if table.schema != schema:
-> 2290 return cast_table_to_schema(table, schema)
2291 elif table.schema.metadata != schema.metadata:

File ~/.local/lib/python3.8/site-packages/datasets/table.py:2248, in cast_table_to_schema(table, schema)
2247 if sorted(table.column_names) != sorted(features):
-> 2248 raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match")
2249 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]

ValueError: Couldn't cast
hexsha: string
size: int64
ext: string
lang: string
max_stars_repo_path: string
max_stars_repo_name: string
max_stars_repo_head_hexsha: string
max_stars_repo_licenses: list<item: string>
child 0, item: string
max_stars_count: int64
max_stars_repo_stars_event_min_datetime: string
max_stars_repo_stars_event_max_datetime: string
max_issues_repo_path: string
max_issues_repo_name: string
max_issues_repo_head_hexsha: string
max_issues_repo_licenses: list<item: string>
child 0, item: string
max_issues_count: int64
max_issues_repo_issues_event_min_datetime: string
max_issues_repo_issues_event_max_datetime: string
max_forks_repo_path: string
max_forks_repo_name: string
max_forks_repo_head_hexsha: string
max_forks_repo_licenses: list<item: string>
child 0, item: string
max_forks_count: int64
max_forks_repo_forks_event_min_datetime: string
max_forks_repo_forks_event_max_datetime: string
content: string
avg_line_length: double
max_line_length: int64
alphanum_fraction: double
id: int64
-- schema metadata --
huggingface: '{"info": {"features": {"hexsha": {"dtype": "string", "_type' + 1979
to
{'hexsha': Value(dtype='string', id=None), 'size': Value(dtype='int64', id=None), 'ext': Value(dtype='string', id=None), 'lang': Value(dtype='string', id=None), 'max_stars_repo_path': Value(dtype='string', id=None), 'max_stars_repo_name': Value(dtype='string', id=None), 'max_stars_repo_head_hexsha': Value(dtype='string', id=None), 'max_stars_repo_licenses': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'max_stars_count': Value(dtype='int64', id=None), 'max_stars_repo_stars_event_min_datetime': Value(dtype='string', id=None), 'max_stars_repo_stars_event_max_datetime': Value(dtype='string', id=None), 'max_issues_repo_path': Value(dtype='string', id=None), 'max_issues_repo_name': Value(dtype='string', id=None), 'max_issues_repo_head_hexsha': Value(dtype='string', id=None), 'max_issues_repo_licenses': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'max_issues_count': Value(dtype='int64', id=None), 'max_issues_repo_issues_event_min_datetime': Value(dtype='string', id=None), 'max_issues_repo_issues_event_max_datetime': Value(dtype='string', id=None), 'max_forks_repo_path': Value(dtype='string', id=None), 'max_forks_repo_name': Value(dtype='string', id=None), 'max_forks_repo_head_hexsha': Value(dtype='string', id=None), 'max_forks_repo_licenses': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'max_forks_count': Value(dtype='int64', id=None), 'max_forks_repo_forks_event_min_datetime': Value(dtype='string', id=None), 'max_forks_repo_forks_event_max_datetime': Value(dtype='string', id=None), 'content': Value(dtype='string', id=None), 'avg_line_length': Value(dtype='float64', id=None), 'max_line_length': Value(dtype='int64', id=None), 'alphanum_fraction': Value(dtype='float64', id=None)}
because column names don't match

The above exception was the direct cause of the following exception:

DatasetGenerationError Traceback (most recent call last)
Cell In[3], line 7
1 # EXAMPLE: dataset = load_dataset(dataset_id, cache_dir="/cache/dir/path" , split="split", use_auth_token="yourTokenStr")
2
3 # The dataset has no splits and all data is loaded as train split by default.
4 # "bigcode/the-stack" does not work
5 # cache_dir (str, optional) – Directory to read/write data. Defaults to “~/datasets”.
----> 7 the_stack_ds = ds.load_dataset("bigcode/the-stack-dedup", split="train", download_mode="reuse_cache_if_exists", cache_dir="/git2skdir/cache_dir_stack", use_auth_token="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")

File ~/.local/lib/python3.8/site-packages/datasets/load.py:1797, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)
1794 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
1796 # Download and prepare data
-> 1797 builder_instance.download_and_prepare(
1798 download_config=download_config,
1799 download_mode=download_mode,
1800 verification_mode=verification_mode,
1801 try_from_hf_gcs=try_from_hf_gcs,
1802 num_proc=num_proc,
1803 storage_options=storage_options,
1804 )
1806 # Build dataset for splits
1807 keep_in_memory = (
1808 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
1809 )

File ~/.local/lib/python3.8/site-packages/datasets/builder.py:890, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
888 if num_proc is not None:
889 prepare_split_kwargs["num_proc"] = num_proc
--> 890 self._download_and_prepare(
891 dl_manager=dl_manager,
892 verification_mode=verification_mode,
893 **prepare_split_kwargs,
894 **download_and_prepare_kwargs,
895 )
896 # Sync info
897 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())

File ~/.local/lib/python3.8/site-packages/datasets/builder.py:985, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
981 split_dict.add(split_generator.split_info)
983 try:
984 # Prepare split will record examples associated to the split
--> 985 self._prepare_split(split_generator, **prepare_split_kwargs)
986 except OSError as e:
987 raise OSError(
988 "Cannot find data file. "
989 + (self.manual_download_instructions or "")
990 + "\nOriginal error:\n"
991 + str(e)
992 ) from None

File ~/.local/lib/python3.8/site-packages/datasets/builder.py:1746, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size)
1744 job_id = 0
1745 with pbar:
-> 1746 for job_id, done, content in self._prepare_split_single(
1747 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
1748 ):
1749 if done:
1750 result = content

File ~/.local/lib/python3.8/site-packages/datasets/builder.py:1891, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id)
1889 if isinstance(e, SchemaInferenceError) and e.context is not None:
1890 e = e.context
-> 1891 raise DatasetGenerationError("An error occurred while generating the dataset") from e
1893 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)

DatasetGenerationError: An error occurred while generating the dataset

amezasor changed discussion status to closed
amezasor changed discussion status to open
amezasor changed discussion title from Cast eError Loading dataset (bigcode/the-stack-dedup) to Casting Error Loading dataset (bigcode/the-stack-dedup)

I am having the same problem. Is there a solution?

There are two directories: "omgrofl" and "numpy" with an additional field "__id__". Number of rows there are 4 and 7.

Sign up or log in to comment