url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
48
51
id
int64
600M
2.19B
node_id
stringlengths
18
24
number
int64
2
6.73k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
milestone
dict
comments
sequencelengths
0
30
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
3 values
active_lock_reason
null
draft
null
pull_request
null
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
state_reason
stringclasses
3 values
https://api.github.com/repos/huggingface/datasets/issues/6073
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6073/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6073/comments
https://api.github.com/repos/huggingface/datasets/issues/6073/events
https://github.com/huggingface/datasets/issues/6073
1,822,167,804
I_kwDODunzps5snBL8
6,073
version2.3.2 load_dataset()data_files can't include .xxxx in path
{ "login": "BUAAChuanWang", "id": 45893496, "node_id": "MDQ6VXNlcjQ1ODkzNDk2", "avatar_url": "https://avatars.githubusercontent.com/u/45893496?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BUAAChuanWang", "html_url": "https://github.com/BUAAChuanWang", "followers_url": "https://api.github.com/users/BUAAChuanWang/followers", "following_url": "https://api.github.com/users/BUAAChuanWang/following{/other_user}", "gists_url": "https://api.github.com/users/BUAAChuanWang/gists{/gist_id}", "starred_url": "https://api.github.com/users/BUAAChuanWang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BUAAChuanWang/subscriptions", "organizations_url": "https://api.github.com/users/BUAAChuanWang/orgs", "repos_url": "https://api.github.com/users/BUAAChuanWang/repos", "events_url": "https://api.github.com/users/BUAAChuanWang/events{/privacy}", "received_events_url": "https://api.github.com/users/BUAAChuanWang/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Version 2.3.2 is over one year old, so please use the latest release (2.14.0) to get the expected behavior. Version 2.3.2 does not contain some fixes we made to fix resolving hidden files/directories (starting with a dot)." ]
2023-07-26T11:09:31
2023-08-29T15:53:59
2023-08-29T15:53:59
NONE
null
null
null
### Describe the bug First, I cd workdir. Then, I just use load_dataset("json", data_file={"train":"/a/b/c/.d/train/train.json", "test":"/a/b/c/.d/train/test.json"}) that couldn't work and <FileNotFoundError: Unable to find '/a/b/c/.d/train/train.jsonl' at /a/b/c/.d/> And I debug, it is fine in version2.1.2 So there maybe a bug in path join. Here is the whole bug report: /x/datasets/loa │ │ d.py:1656 in load_dataset │ │ │ │ 1653 │ ignore_verifications = ignore_verifications or save_infos │ │ 1654 │ │ │ 1655 │ # Create a dataset builder │ │ ❱ 1656 │ builder_instance = load_dataset_builder( │ │ 1657 │ │ path=path, │ │ 1658 │ │ name=name, │ │ 1659 │ │ data_dir=data_dir, │ │ │ │ x/datasets/loa │ │ d.py:1439 in load_dataset_builder │ │ │ │ 1436 │ if use_auth_token is not None: │ │ 1437 │ │ download_config = download_config.copy() if download_config e │ │ 1438 │ │ download_config.use_auth_token = use_auth_token │ │ ❱ 1439 │ dataset_module = dataset_module_factory( │ │ 1440 │ │ path, │ │ 1441 │ │ revision=revision, │ │ 1442 │ │ download_config=download_config, │ │ │ │ x/datasets/loa │ │ d.py:1097 in dataset_module_factory │ │ │ │ 1094 │ │ │ 1095 │ # Try packaged │ │ 1096 │ if path in _PACKAGED_DATASETS_MODULES: │ │ ❱ 1097 │ │ return PackagedDatasetModuleFactory( │ │ 1098 │ │ │ path, │ │ 1099 │ │ │ data_dir=data_dir, │ │ 1100 │ │ │ data_files=data_files, │ │ │ │x/datasets/loa │ │ d.py:743 in get_module │ │ │ │ 740 │ │ │ if self.data_dir is not None │ │ 741 │ │ │ else get_patterns_locally(str(Path().resolve())) │ │ 742 │ │ ) │ │ ❱ 743 │ │ data_files = DataFilesDict.from_local_or_remote( │ │ 744 │ │ │ patterns, │ │ 745 │ │ │ use_auth_token=self.download_config.use_auth_token, │ │ 746 │ │ │ base_path=str(Path(self.data_dir).resolve()) if self.data │ │ │ │ x/datasets/dat │ │ a_files.py:590 in from_local_or_remote │ │ │ │ 587 │ │ out = cls() │ │ 588 │ │ for key, patterns_for_key in patterns.items(): │ │ 589 │ │ │ out[key] = ( │ │ ❱ 590 │ │ │ │ DataFilesList.from_local_or_remote( │ │ 591 │ │ │ │ │ patterns_for_key, │ │ 592 │ │ │ │ │ base_path=base_path, │ │ 593 │ │ │ │ │ allowed_extensions=allowed_extensions, │ │ │ │ /x/datasets/dat │ │ a_files.py:558 in from_local_or_remote │ │ │ │ 555 │ │ use_auth_token: Optional[Union[bool, str]] = None, │ │ 556 │ ) -> "DataFilesList": │ │ 557 │ │ base_path = base_path if base_path is not None else str(Path() │ │ ❱ 558 │ │ data_files = resolve_patterns_locally_or_by_urls(base_path, pa │ │ 559 │ │ origin_metadata = _get_origin_metadata_locally_or_by_urls(data │ │ 560 │ │ return cls(data_files, origin_metadata) │ │ 561 │ │ │ │ /x/datasets/dat │ │ a_files.py:195 in resolve_patterns_locally_or_by_urls │ │ │ │ 192 │ │ if is_remote_url(pattern): │ │ 193 │ │ │ data_files.append(Url(pattern)) │ │ 194 │ │ else: │ │ ❱ 195 │ │ │ for path in _resolve_single_pattern_locally(base_path, pat │ │ 196 │ │ │ │ data_files.append(path) │ │ 197 │ │ │ 198 │ if not data_files: │ │ │ │ /x/datasets/dat │ │ a_files.py:145 in _resolve_single_pattern_locally │ │ │ │ 142 │ │ error_msg = f"Unable to find '{pattern}' at {Path(base_path).r │ │ 143 │ │ if allowed_extensions is not None: │ │ 144 │ │ │ error_msg += f" with any supported extension {list(allowed │ │ ❱ 145 │ │ raise FileNotFoundError(error_msg) │ │ 146 │ return sorted(out) │ │ 147 ### Steps to reproduce the bug 1. Version=2.3.2 2. In shell, cd workdir.(cd /a/b/c/.d/) 3. load_dataset("json", data_file={"train":"/a/b/c/.d/train/train.json", "test":"/a/b/c/.d/train/test.json"}) ### Expected behavior fix it please~ ### Environment info 2.3.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6073/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6073/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6071
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6071/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6071/comments
https://api.github.com/repos/huggingface/datasets/issues/6071/events
https://github.com/huggingface/datasets/issues/6071
1,821,990,749
I_kwDODunzps5smV9d
6,071
storage_options provided to load_dataset not fully piping through since datasets 2.14.0
{ "login": "exs-avianello", "id": 128361578, "node_id": "U_kgDOB6akag", "avatar_url": "https://avatars.githubusercontent.com/u/128361578?v=4", "gravatar_id": "", "url": "https://api.github.com/users/exs-avianello", "html_url": "https://github.com/exs-avianello", "followers_url": "https://api.github.com/users/exs-avianello/followers", "following_url": "https://api.github.com/users/exs-avianello/following{/other_user}", "gists_url": "https://api.github.com/users/exs-avianello/gists{/gist_id}", "starred_url": "https://api.github.com/users/exs-avianello/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/exs-avianello/subscriptions", "organizations_url": "https://api.github.com/users/exs-avianello/orgs", "repos_url": "https://api.github.com/users/exs-avianello/repos", "events_url": "https://api.github.com/users/exs-avianello/events{/privacy}", "received_events_url": "https://api.github.com/users/exs-avianello/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! Thanks for reporting, I opened a PR to fix this\r\n\r\nWhat filesystem are you using ?", "Hi @lhoestq ! Thank you so much 🙌 \r\n\r\nIt's a bit of a custom setup, but in practice I am using a [pyarrow.fs.S3FileSystem](https://arrow.apache.org/docs/python/generated/pyarrow.fs.S3FileSystem.html) (wrapped in a `fsspec.implementations.arrow.ArrowFSWrapper` [to make it](https://arrow.apache.org/docs/python/filesystems.html#using-arrow-filesystems-with-fsspec) `fsspec` compatible). I also register it as an entrypoint with `fsspec` so that it's the one that gets automatically resolved when looking for filesystems for the `s3` protocol\r\n\r\nIn my case the `storage_option` that seemed not getting piped through was the filesystem's `endpoint_override` that I use in some tests to point at a mock S3 bucket" ]
2023-07-26T09:37:20
2023-07-27T12:42:58
2023-07-27T12:42:58
NONE
null
null
null
### Describe the bug Since the latest release of `datasets` (`2.14.0`), custom filesystem `storage_options` passed to `load_dataset()` do not seem to propagate through all the way - leading to problems if loading data files that need those options to be set. I think this is because of the new `_prepare_path_and_storage_options()` (https://github.com/huggingface/datasets/pull/6028), which returns the right `storage_options` to use given a path and a `DownloadConfig` - but which might not be taking into account the extra `storage_options` explicitly provided e.g. through `load_dataset()` ### Steps to reproduce the bug ```python import fsspec import pandas as pd import datasets # Generate mock parquet file data_files = "demo.parquet" pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}).to_parquet(data_files) _storage_options = {"x": 1, "y": 2} fs = fsspec.filesystem("file", **_storage_options) dataset = datasets.load_dataset( "parquet", data_files=data_files, storage_options=fs.storage_options ) ``` Looking at the `storage_options` resolved here: https://github.com/huggingface/datasets/blob/b0177910b32712f28d147879395e511207e39958/src/datasets/data_files.py#L331 they end up being `{}`, instead of propagating through the `storage_options` that were provided to `load_dataset` (`fs.storage_options`). As these then get used for the filesystem operation a few lines below https://github.com/huggingface/datasets/blob/b0177910b32712f28d147879395e511207e39958/src/datasets/data_files.py#L339 the call will fail if the user-provided `storage_options` were needed. --- A temporary workaround that seemed to work locally to bypass the problem was to bundle a duplicate of the `storage_options` into the `download_config`, so that they make their way all the way to `_prepare_path_and_storage_options()` and get extracted correctly: ```python dataset = datasets.load_dataset( "parquet", data_files=data_files, storage_options=fs.storage_options, download_config=datasets.DownloadConfig(storage_options={fs.protocol: fs.storage_options}), ) ``` ### Expected behavior `storage_options` provided to `load_dataset` take effect in all backend filesystem operations. ### Environment info datasets==2.14.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6071/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6071/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6069
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6069/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6069/comments
https://api.github.com/repos/huggingface/datasets/issues/6069/events
https://github.com/huggingface/datasets/issues/6069
1,820,831,535
I_kwDODunzps5sh68v
6,069
KeyError: dataset has no key "image"
{ "login": "etetteh", "id": 28512232, "node_id": "MDQ6VXNlcjI4NTEyMjMy", "avatar_url": "https://avatars.githubusercontent.com/u/28512232?v=4", "gravatar_id": "", "url": "https://api.github.com/users/etetteh", "html_url": "https://github.com/etetteh", "followers_url": "https://api.github.com/users/etetteh/followers", "following_url": "https://api.github.com/users/etetteh/following{/other_user}", "gists_url": "https://api.github.com/users/etetteh/gists{/gist_id}", "starred_url": "https://api.github.com/users/etetteh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/etetteh/subscriptions", "organizations_url": "https://api.github.com/users/etetteh/orgs", "repos_url": "https://api.github.com/users/etetteh/repos", "events_url": "https://api.github.com/users/etetteh/events{/privacy}", "received_events_url": "https://api.github.com/users/etetteh/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "You can list the dataset's columns with `ds.column_names` before `.map` to check whether the dataset has an `image` column. If it doesn't, then this is a bug. Otherwise, please paste the line with the `.map` call.\r\n\r\n\r\n", "This is the piece of code I am running:\r\n```\r\ndata_transforms = utils.get_data_augmentation(args)\r\nimage_dataset = utils.load_image_dataset(args.dataset)\r\n\r\ndef resize(examples):\r\n examples[\"pixel_values\"] = [image.convert(\"RGB\").resize((300, 300)) for image in examples[\"image\"]]\r\n return examples\r\n\r\ndef preprocess_train(example_batch):\r\n print(f\"Example batch: \\n{example_batch}\")\r\n example_batch[\"pixel_values\"] = [\r\n data_transforms[\"train\"](image.convert(\"RGB\")) for image in example_batch[\"pixel_values\"]\r\n ]\r\n return example_batch\r\n\r\ndef preprocess_val(example_batch):\r\n example_batch[\"pixel_values\"] = [\r\n data_transforms[\"val\"](image.convert(\"RGB\")) for image in example_batch[\"pixel_values\"]\r\n ]\r\n return example_batch\r\n\r\nimage_dataset = image_dataset.map(resize, remove_columns=[\"image\"], batched=True)\r\n\r\nimage_dataset[\"train\"].set_transform(preprocess_train)\r\nimage_dataset[\"validation\"].set_transform(preprocess_val)\r\n```\r\n\r\nWhen I print ds.column_names I get the following\r\n`{'train': ['image', 'label'], 'validation': ['image', 'label'], 'test': ['image', 'label']}`\r\n\r\nThe `print(f\"Example batch: \\n{example_batch}\")` in the `preprocess_train` function outputs only labels without images:\r\n```\r\nExample batch: \r\n{'label': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]}\r\n```\r\n\r\nThe weird part of it all is that a sample code runs in a jupyter lab notebook without any bugs, but when I run my scripts from the terminal I get the bug. The same code.", "The `remove_columns=[\"image\"]` argument in the `.map` call removes the `image` column from the output, so drop this argument to preserve it.", "The problem is not with the removal of the image key. The bug is why only the labels are sent to be process, instead of all the featues or dictionary keys.\r\n\r\nP.S. I just dropped the removal argument as you've suggested, but that didn't solve the problem, because only the labels are being sent to be processed", "All the `image_dataset.column_names` after the `map` call should also be present in `preprocess_train `/`preprocess_val` unless (input) `columns` in `set_transform` are specified.\r\n\r\nIf that's not the case, we need a full reproducer (not snippets) with the environment info.", "I have resolved the error after including a collate function as indicated in the Quick Start session of the Datasets docs.:\r\n\r\nHere is what I did:\r\n```\r\ndata_transforms = utils.get_data_augmentation(args)\r\nimage_dataset = utils.load_image_dataset(args.dataset)\r\n\r\ndef preprocess_train(example_batch):\r\n example_batch[\"pixel_values\"] = [\r\n data_transforms[\"train\"](image.convert(\"RGB\")) for image in example_batch[\"image\"]\r\n ]\r\n return example_batch\r\n\r\ndef preprocess_val(example_batch):\r\n example_batch[\"pixel_values\"] = [\r\n data_transforms[\"val\"](image.convert(\"RGB\")) for image in example_batch[\"image\"]\r\n ]\r\n return example_batch\r\n\r\ndef collate_fn(examples):\r\n images = []\r\n labels = []\r\n for example in examples:\r\n images.append((example[\"pixel_values\"]))\r\n labels.append(example[\"label\"])\r\n\r\n pixel_values = torch.stack(images)\r\n labels = torch.tensor(labels)\r\n return {\"pixel_values\": pixel_values, \"label\": labels}\r\n\r\ntrain_dataset = image_dataset[\"train\"].with_transform(preprocess_train)\r\nval_dataset = image_dataset[\"validation\"].with_transform(preprocess_val)\r\n\r\nimage_datasets = {\r\n \"train\": train_dataset,\r\n \"val\": val_dataset\r\n}\r\n\r\nsamplers = {\r\n \"train\": data.RandomSampler(train_dataset),\r\n \"val\": data.SequentialSampler(val_dataset),\r\n}\r\n\r\ndataloaders = {\r\n x: data.DataLoader(\r\n image_datasets[x],\r\n collate_fn=collate_fn,\r\n batch_size=batch_size,\r\n sampler=samplers[x],\r\n num_workers=args.num_workers,\r\n worker_init_fn=utils.set_seed_for_worker,\r\n generator=g,\r\n pin_memory=True,\r\n )\r\n for x in [\"train\", \"val\"]\r\n}\r\n\r\ntrain_loader, val_loader = dataloaders[\"train\"], dataloaders[\"val\"]\r\n```\r\nEverything runs fine without any bug now. " ]
2023-07-25T17:45:50
2023-07-27T12:42:17
2023-07-27T12:42:17
NONE
null
null
null
### Describe the bug I've loaded a local image dataset with: `ds = laod_dataset("imagefolder", data_dir=path-to-data)` And defined a transform to process the data, following the Datasets docs. However, I get a keyError error, indicating there's no "image" key in my dataset. When I printed out the example_batch sent to the transformation function, it shows only the labels are being sent to the function. For some reason, the images are not in the example batches. ### Steps to reproduce the bug I'm using the latest stable version of datasets ### Expected behavior I expect the example_batches to contain both images and labels ### Environment info I'm using the latest stable version of datasets
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6069/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6069/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6066
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6066/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6066/comments
https://api.github.com/repos/huggingface/datasets/issues/6066/events
https://github.com/huggingface/datasets/issues/6066
1,819,717,542
I_kwDODunzps5sdq-m
6,066
AttributeError: '_tqdm_cls' object has no attribute '_lock'
{ "login": "codingl2k1", "id": 138426806, "node_id": "U_kgDOCEA5tg", "avatar_url": "https://avatars.githubusercontent.com/u/138426806?v=4", "gravatar_id": "", "url": "https://api.github.com/users/codingl2k1", "html_url": "https://github.com/codingl2k1", "followers_url": "https://api.github.com/users/codingl2k1/followers", "following_url": "https://api.github.com/users/codingl2k1/following{/other_user}", "gists_url": "https://api.github.com/users/codingl2k1/gists{/gist_id}", "starred_url": "https://api.github.com/users/codingl2k1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/codingl2k1/subscriptions", "organizations_url": "https://api.github.com/users/codingl2k1/orgs", "repos_url": "https://api.github.com/users/codingl2k1/repos", "events_url": "https://api.github.com/users/codingl2k1/events{/privacy}", "received_events_url": "https://api.github.com/users/codingl2k1/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! I opened https://github.com/huggingface/datasets/pull/6067 to add the missing `_lock`\r\n\r\nWe'll do a patch release soon, but feel free to install `datasets` from source in the meantime", "I have tested the latest main, it does not work.\r\n\r\nI add more logs to reproduce this issue, it looks like a multi threading bug:\r\n\r\n```python\r\n@contextmanager\r\ndef ensure_lock(tqdm_class, lock_name=\"\"):\r\n \"\"\"get (create if necessary) and then restore `tqdm_class`'s lock\"\"\"\r\n import os\r\n import threading\r\n print(os.getpid(), threading.get_ident(), \"ensure_lock\", tqdm_class, lock_name)\r\n old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock\r\n lock = old_lock or tqdm_class.get_lock() # maybe create a new lock\r\n lock = getattr(lock, lock_name, lock) # maybe subtype\r\n tqdm_class.set_lock(lock)\r\n print(os.getpid(), threading.get_ident(), \"set_lock\")\r\n yield lock\r\n if old_lock is None:\r\n print(os.getpid(), threading.get_ident(), \"del tqdm_class\")\r\n del tqdm_class._lock\r\n else:\r\n tqdm_class.set_lock(old_lock)\r\n```\r\noutput\r\n```\r\n64943 8424758784 ensure_lock <datasets.utils.logging._tqdm_cls object at 0x2aa7fb250> \r\n64943 8424758784 set_lock\r\n64943 8424758784 del tqdm_class\r\n64943 8424758784 ensure_lock <datasets.utils.logging._tqdm_cls object at 0x2aa7fb250> \r\n64943 8424758784 set_lock\r\n64943 8424758784 del tqdm_class\r\n64943 11638370304 ensure_lock <datasets.utils.logging._tqdm_cls object at 0x2aa7fb250> \r\n64943 11638370304 set_lock\r\n64943 11568967680 ensure_lock <datasets.utils.logging._tqdm_cls object at 0x2aa7fb250> \r\n64943 11568967680 set_lock\r\n64943 11638370304 del tqdm_class\r\n64943 11638370304 ensure_lock <datasets.utils.logging._tqdm_cls object at 0x2aa7fb250> \r\n64943 11638370304 set_lock\r\n64943 11638370304 del tqdm_class\r\n64943 11568967680 del tqdm_class\r\n```\r\n\r\nThread `11638370304` del the _lock from tqdm_class first, then thread `11568967680` del _lock failed.", "Maybe it is a bug of tqdm? I think simply use `try ... except AttributeError ...` wraps `del tqdm_class._lock` should work.", "Yes it looks like a bug on their end indeed, do you want to open a PR on tqdm ?\r\n\r\nLet me see if I can find a workaround in the meantime", "I opened https://github.com/huggingface/datasets/pull/6068 if you want to try it out", "> I opened #6068 if you want to try it out\r\n\r\nThis fix works! Thanks.", "Awesome ! closing this then :)\r\nWe'll do a patch release today or tomorrow" ]
2023-07-25T07:24:36
2023-07-26T10:56:25
2023-07-26T10:56:24
NONE
null
null
null
### Describe the bug ```python File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/datasets/load.py", line 1034, in get_module data_files = DataFilesDict.from_patterns( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/datasets/data_files.py", line 671, in from_patterns DataFilesList.from_patterns( File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/datasets/data_files.py", line 586, in from_patterns origin_metadata = _get_origin_metadata(data_files, download_config=download_config) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/datasets/data_files.py", line 502, in _get_origin_metadata return thread_map( ^^^^^^^^^^^ File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 70, in thread_map return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 48, in _executor_map with ensure_lock(tqdm_class, lock_name=lock_name) as lk: File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/contextlib.py", line 144, in __exit__ next(self.gen) File "/Users/codingl2k1/.pyenv/versions/3.11.4/lib/python3.11/site-packages/tqdm/contrib/concurrent.py", line 25, in ensure_lock del tqdm_class._lock ^^^^^^^^^^^^^^^^ AttributeError: '_tqdm_cls' object has no attribute '_lock' ``` ### Steps to reproduce the bug Happens ocasionally. ### Expected behavior I added a print in tqdm `ensure_lock()`, got a `ensure_lock <datasets.utils.logging._tqdm_cls object at 0x16dddead0> ` print. According to the code in https://github.com/tqdm/tqdm/blob/master/tqdm/contrib/concurrent.py#L24 ```python @contextmanager def ensure_lock(tqdm_class, lock_name=""): """get (create if necessary) and then restore `tqdm_class`'s lock""" print("ensure_lock", tqdm_class, lock_name) old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock lock = old_lock or tqdm_class.get_lock() # maybe create a new lock lock = getattr(lock, lock_name, lock) # maybe subtype tqdm_class.set_lock(lock) yield lock if old_lock is None: del tqdm_class._lock # <-- It tries to del the `_lock` attribute from tqdm_class. else: tqdm_class.set_lock(old_lock) ``` But, huggingface datasets `datasets.utils.logging._tqdm_cls` does not have the field `_lock`: https://github.com/huggingface/datasets/blob/main/src/datasets/utils/logging.py#L205 ```python class _tqdm_cls: def __call__(self, *args, disable=False, **kwargs): if _tqdm_active and not disable: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() ``` ### Environment info Python 3.11.4 tqdm '4.65.0' datasets master
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6066/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6066/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6060
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6060/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6060/comments
https://api.github.com/repos/huggingface/datasets/issues/6060/events
https://github.com/huggingface/datasets/issues/6060
1,816,614,120
I_kwDODunzps5sR1To
6,060
Dataset.map() execute twice when in PyTorch DDP mode
{ "login": "wanghaoyucn", "id": 39429965, "node_id": "MDQ6VXNlcjM5NDI5OTY1", "avatar_url": "https://avatars.githubusercontent.com/u/39429965?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wanghaoyucn", "html_url": "https://github.com/wanghaoyucn", "followers_url": "https://api.github.com/users/wanghaoyucn/followers", "following_url": "https://api.github.com/users/wanghaoyucn/following{/other_user}", "gists_url": "https://api.github.com/users/wanghaoyucn/gists{/gist_id}", "starred_url": "https://api.github.com/users/wanghaoyucn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wanghaoyucn/subscriptions", "organizations_url": "https://api.github.com/users/wanghaoyucn/orgs", "repos_url": "https://api.github.com/users/wanghaoyucn/repos", "events_url": "https://api.github.com/users/wanghaoyucn/events{/privacy}", "received_events_url": "https://api.github.com/users/wanghaoyucn/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Sorry for asking a duplicate question about `num_proc`, I searched the forum and find the solution.\r\n\r\nBut I still can't make the trick with `torch.distributed.barrier()` to only map at the main process work. The [post on forum]( https://discuss.huggingface.co/t/slow-processing-with-map-when-using-deepspeed-or-fairscale/7229/7) didn't help.", "If it does the `map` twice then it means the hash of your map function is not some same between your two processes.\r\n\r\nCan you make sure your map functions have the same hash in different processes ?\r\n\r\n```python\r\nfrom datasets.fingerprint import Hasher\r\n\r\nprint(Hasher.hash(lambda x: cut_reorder_keys(x, num_stations_list=args.num_stations_list, is_pad=True, is_train=True)))\r\nprint(Hasher.hash(lambda x: random_shift(x, shift_range=(-160, 0), feature_scale=16)))\r\n```\r\n\r\nYou can also set the fingerprint used to reload the resulting dataset by passing `new_finegrprint=` in `map`, see https://huggingface.co/docs/datasets/v2.13.1/en/about_cache#the-cache. This will force the different processes to use the same fingerprint used to locate the resulting dataset in the cache.", "Thanks for help! I find the fingerprint between processes don't have same hash:\r\n```\r\nRank 0: Gpu 0 cut_reorder_keys fingerprint c7f47f40e9a67657\r\nRank 0: Gpu 0 random_shift fingerprint 240a0ce79831e7d4\r\n\r\nRank 1: Gpu 1 cut_reorder_keys fingerprint 20edd3d9cf284001\r\nRank 1: Gpu 1 random_shift fingerprint 819f7c1c18e7733f\r\n```\r\nBut my functions only process the example one by one and don't need rank or other arguments. After all it can work in the test for dataset and dataloader.\r\nI'll try to set `new_fingerprint` to see if it works and figure out the reason of different hash.", "I finally figure it out. The fingerprint of the function will change if other key-value pairs change in `args` even the `args.num_stations_list` is not changed.\r\n\r\n```python\r\nlambda x: cut_reorder_keys(x, num_stations_list=args.num_stations_list, is_pad=True, is_train=True)\r\n```\r\n\r\nMy `args` contains the key `rank` which refers the rank of its GPU, so the fingerprints change among the GPUs.\r\nI use `partial` in `functools` to generate a partial function that fixs the argument `num_stations_list=args.num_stations_list`, and the fingerprint of this partial function keeps among the GPUs. Finally I can reuse the mapped cache." ]
2023-07-22T05:06:43
2024-01-22T18:35:12
2024-01-22T18:35:12
NONE
null
null
null
### Describe the bug I use `torchrun --standalone --nproc_per_node=2 train.py` to start training. And write the code following the [docs](https://huggingface.co/docs/datasets/process#distributed-usage). The trick about using `torch.distributed.barrier()` to only execute map at the main process doesn't always work. When I am training model, it will map twice. When I am running a test for dataset and dataloader (just print the batches), it can work. Their code about loading dataset are same. And on another server with 30 CPU cores, I use 2 GPUs and it can't work neither. I have tried to use `rank` and `local_rank` to check, they all didn't make sense. ### Steps to reproduce the bug use `torchrun --standalone --nproc_per_node=2 train.py` or `torchrun --standalone train.py` to run This is my code: ```python if args.distributed and world_size > 1: if args.local_rank > 0: print(f"Rank {args.rank}: Gpu {args.gpu} waiting for main process to perform the mapping", force=True) torch.distributed.barrier() print("Mapping dataset") dataset = dataset.map(lambda x: cut_reorder_keys(x, num_stations_list=args.num_stations_list, is_pad=True, is_train=True), num_proc=8, desc="cut_reorder_keys") dataset = dataset.map(lambda x: random_shift(x, shift_range=(-160, 0), feature_scale=16), num_proc=8, desc="random_shift") dataset_test = dataset_test.map(lambda x: cut_reorder_keys(x, num_stations_list=args.num_stations_list, is_pad=True, is_train=False), num_proc=8, desc="cut_reorder_keys") if args.local_rank == 0: print("Mapping finished, loading results from main process") torch.distributed.barrier() ``` ### Expected behavior Only the main process will execute `map`, while the sub process will load cache from disk. ### Environment info server with 64 CPU cores (AMD Ryzen Threadripper PRO 5995WX 64-Cores) and 2 RTX 4090 - `python==3.9.16` - `datasets==2.13.1` - `torch==2.0.1+cu117` - `22.04.1-Ubuntu` server with 30 CPU cores (Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz) and 2 RTX 4090 - `python==3.9.0` - `datasets==2.13.1` - `torch==2.0.1+cu117` - `Ubuntu 20.04`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6060/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6060/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6059
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6059/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6059/comments
https://api.github.com/repos/huggingface/datasets/issues/6059/events
https://github.com/huggingface/datasets/issues/6059
1,816,537,176
I_kwDODunzps5sRihY
6,059
Provide ability to load label mappings from file
{ "login": "david-waterworth", "id": 5028974, "node_id": "MDQ6VXNlcjUwMjg5NzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/5028974?v=4", "gravatar_id": "", "url": "https://api.github.com/users/david-waterworth", "html_url": "https://github.com/david-waterworth", "followers_url": "https://api.github.com/users/david-waterworth/followers", "following_url": "https://api.github.com/users/david-waterworth/following{/other_user}", "gists_url": "https://api.github.com/users/david-waterworth/gists{/gist_id}", "starred_url": "https://api.github.com/users/david-waterworth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david-waterworth/subscriptions", "organizations_url": "https://api.github.com/users/david-waterworth/orgs", "repos_url": "https://api.github.com/users/david-waterworth/repos", "events_url": "https://api.github.com/users/david-waterworth/events{/privacy}", "received_events_url": "https://api.github.com/users/david-waterworth/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[]
2023-07-22T02:04:19
2023-07-22T02:04:19
null
NONE
null
null
null
### Feature request My task is classification of a dataset containing a large label set that includes a hierarchy. Even ignoring the hierarchy I'm not able to find an example using `datasets` where the label names aren't hard-coded. This works find for classification of a handful of labels but ideally there would be a way of loading the name/id mappings required for `datasets.features.ClassLabel` from a file. It is possible to pass a file to ClassLabel but I cannot see an easy way of using this with `GeneratorBasedBuilder` since `self._info` is called before the `dl_manager` is constructed so even if my dataset contains say `label_mappings.json` there's no way of loading it in order to construct the `datasets.DatasetInfo` I can see other uses to accessing the `download_manager` from `self._info` - i.e. if the files contain a schema (i.e. `arrow` or `parquet` files) the `datasets.DatasetInfo` could be inferred. The workaround that was suggested in the forum is to generate a `.py` file from the `label_mappings.json` and import it. ``` class TestDatasetBuilder(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["label_1", "label_2"]), } ), task_templates=[TextClassification(text_column="text", label_column="label")], ) def _split_generators(self, dl_manager): train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL) test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}), ] def _generate_examples(self, filepath): """Generate AG News examples.""" with open(filepath, encoding="utf-8") as csv_file: csv_reader = csv.DictReader(csv_file) for id_, row in enumerate(csv_reader): yield id_, row ``` ### Motivation Allow `datasets.DatasetInfo` to be generated based on the contents of the dataset. ### Your contribution I'm willing to work on a PR with guidence.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6059/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6059/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6058
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6058/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6058/comments
https://api.github.com/repos/huggingface/datasets/issues/6058/events
https://github.com/huggingface/datasets/issues/6058
1,815,131,397
I_kwDODunzps5sMLUF
6,058
laion-coco download error
{ "login": "yangyijune", "id": 54424110, "node_id": "MDQ6VXNlcjU0NDI0MTEw", "avatar_url": "https://avatars.githubusercontent.com/u/54424110?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yangyijune", "html_url": "https://github.com/yangyijune", "followers_url": "https://api.github.com/users/yangyijune/followers", "following_url": "https://api.github.com/users/yangyijune/following{/other_user}", "gists_url": "https://api.github.com/users/yangyijune/gists{/gist_id}", "starred_url": "https://api.github.com/users/yangyijune/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangyijune/subscriptions", "organizations_url": "https://api.github.com/users/yangyijune/orgs", "repos_url": "https://api.github.com/users/yangyijune/repos", "events_url": "https://api.github.com/users/yangyijune/events{/privacy}", "received_events_url": "https://api.github.com/users/yangyijune/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "This can also mean one of the files was not downloaded correctly.\r\n\r\nWe log an erroneous file's name before raising the reader's error, so this is how you can find the problematic file. Then, you should delete it and call `load_dataset` again.\r\n\r\n(I checked all the uploaded files, and they seem to be valid Parquet files, so I don't think this is a bug on their side)\r\n" ]
2023-07-21T04:24:15
2023-07-22T01:42:06
2023-07-22T01:42:06
NONE
null
null
null
### Describe the bug The full trace: ``` /home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/load.py:1744: FutureWarning: 'ignore_verifications' was de precated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0. You can remove this warning by passing 'verification_mode=no_checks' instead. warnings.warn( Downloading and preparing dataset parquet/laion--laion-coco to /home/bian/.cache/huggingface/datasets/laion___parquet/laion-- laion-coco-cb4205d7f1863066/0.0.0/bcacc8bdaa0614a5d73d0344c813275e590940c6ea8bc569da462847103a1afd... Downloading data: 100%|█| 1.89G/1.89G [04:57<00:00, Downloading data files: 100%|█| 1/1 [04:59<00:00, 2 Extracting data files: 100%|█| 1/1 [00:00<00:00, 13 Generating train split: 0 examples [00:00, ? examples/s]<_io.BufferedReader name='/home/bian/.cache/huggingface/datasets/downlo ads/26d7a016d25bbd9443115cfa3092136e8eb2f1f5bcd4154 0cb9234572927f04c'> Traceback (most recent call last): File "/home/bian/data/ZOC/download_laion_coco.py", line 4, in <module> dataset = load_dataset("laion/laion-coco", ignore_verifications=True) File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/load.py", line 1791, in load_dataset builder_instance.download_and_prepare( File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/builder.py", line 891, in download_and_prepare self._download_and_prepare( File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/builder.py", line 986, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/builder.py", line 1748, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/builder.py", line 1842, in _prepare_split_single generator = self._generate_tables(**gen_kwargs) File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py", line 67, in _generate_tables parquet_file = pq.ParquetFile(f) File "/home/bian/anaconda3/envs/sd/lib/python3.10/site-packages/pyarrow/parquet/core.py", line 323, in __init__ self.reader.open( File "pyarrow/_parquet.pyx", line 1227, in pyarrow._parquet.ParquetReader.open File "pyarrow/error.pxi", line 100, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Parquet magic bytes not found in footer. Either the file is corrupted or this is not a parquet file . ``` I have carefully followed the instructions in #5264 but still get the same error. Other helpful information: ``` ds = load_dataset("parquet", data_files= ...: "https://huggingface.co/datasets/laion/l ...: aion-coco/resolve/d22869de3ccd39dfec1507 ...: f7ded32e4a518dad24/part-00000-2256f782-1 ...: 26f-4dc6-b9c6-e6757637749d-c000.snappy.p ...: arquet") Found cached dataset parquet (/home/bian/.cache/huggingface/datasets/parquet/default-a02eea00aeb08b0e/0.0.0/bb8ccf89d9ee38581ff5e51506d721a9b37f14df8090dc9b2d8fb4a40957833f) 100%|██████████████| 1/1 [00:00<00:00, 4.55it/s] ``` ### Steps to reproduce the bug ``` from datasets import load_dataset dataset = load_dataset("laion/laion-coco", ignore_verifications=True/False) ``` ### Expected behavior Properly load Laion-coco dataset ### Environment info datasets==2.11.0 torch==1.12.1 python 3.10
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6058/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6058/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6057
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6057/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6057/comments
https://api.github.com/repos/huggingface/datasets/issues/6057/events
https://github.com/huggingface/datasets/issues/6057
1,815,100,151
I_kwDODunzps5sMDr3
6,057
Why is the speed difference of gen example so big?
{ "login": "pixeli99", "id": 46072190, "node_id": "MDQ6VXNlcjQ2MDcyMTkw", "avatar_url": "https://avatars.githubusercontent.com/u/46072190?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pixeli99", "html_url": "https://github.com/pixeli99", "followers_url": "https://api.github.com/users/pixeli99/followers", "following_url": "https://api.github.com/users/pixeli99/following{/other_user}", "gists_url": "https://api.github.com/users/pixeli99/gists{/gist_id}", "starred_url": "https://api.github.com/users/pixeli99/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pixeli99/subscriptions", "organizations_url": "https://api.github.com/users/pixeli99/orgs", "repos_url": "https://api.github.com/users/pixeli99/repos", "events_url": "https://api.github.com/users/pixeli99/events{/privacy}", "received_events_url": "https://api.github.com/users/pixeli99/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi!\r\n\r\nIt's hard to explain this behavior without more information. Can you profile the slower version with the following code\r\n```python\r\nimport cProfile, pstats\r\nfrom datasets import load_dataset\r\n\r\nwith cProfile.Profile() as profiler:\r\n ds = load_dataset(...)\r\n\r\nstats = pstats.Stats(profiler).sort_stats(\"cumtime\")\r\nstats.print_stats()\r\n```\r\nand share the output?" ]
2023-07-21T03:34:49
2023-10-04T18:06:16
2023-10-04T18:06:15
NONE
null
null
null
```python def _generate_examples(self, metadata_path, images_dir, conditioning_images_dir): with open(metadata_path, 'r') as file: metadata = json.load(file) for idx, item in enumerate(metadata): image_path = item.get('image_path') text_content = item.get('text_content') image_data = open(image_path, "rb").read() yield idx, { "text": text_content, "image": { "path": image_path, "bytes": image_data, }, "conditioning_image": { "path": image_path, "bytes": image_data, }, } ``` Hello, I use the above function to deal with my local data set, but I am very surprised that the speed at which I generate example is very different. When I start a training task, **sometimes 1000examples/s, sometimes only 10examples/s.** ![image](https://github.com/huggingface/datasets/assets/46072190/cdc17661-8267-4fd8-b30c-b74d505efd9b) I'm not saying that speed is changing all the time. I mean, the reading speed is different in different training, which will cause me to start training over and over again until the speed of this generation of examples is normal.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6057/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6057/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6055
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6055/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6055/comments
https://api.github.com/repos/huggingface/datasets/issues/6055/events
https://github.com/huggingface/datasets/issues/6055
1,813,524,145
I_kwDODunzps5sGC6x
6,055
Fix host URL in The Pile datasets
{ "login": "nickovchinnikov", "id": 7540752, "node_id": "MDQ6VXNlcjc1NDA3NTI=", "avatar_url": "https://avatars.githubusercontent.com/u/7540752?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nickovchinnikov", "html_url": "https://github.com/nickovchinnikov", "followers_url": "https://api.github.com/users/nickovchinnikov/followers", "following_url": "https://api.github.com/users/nickovchinnikov/following{/other_user}", "gists_url": "https://api.github.com/users/nickovchinnikov/gists{/gist_id}", "starred_url": "https://api.github.com/users/nickovchinnikov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nickovchinnikov/subscriptions", "organizations_url": "https://api.github.com/users/nickovchinnikov/orgs", "repos_url": "https://api.github.com/users/nickovchinnikov/repos", "events_url": "https://api.github.com/users/nickovchinnikov/events{/privacy}", "received_events_url": "https://api.github.com/users/nickovchinnikov/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[]
2023-07-20T09:08:52
2023-07-20T09:09:37
null
NONE
null
null
null
### Describe the bug In #3627 and #5543, you tried to fix the host URL in The Pile datasets. But both URLs are not working now: `HTTPError: 404 Client Error: Not Found for URL: https://the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst` And `ConnectTimeout: HTTPSConnectionPool(host='mystic.the-eye.eu', port=443): Max retries exceeded with url: /public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst (Caused by ConnectTimeoutError(, 'Connection to mystic.the-eye.eu timed out. (connect timeout=10.0)'))` ### Steps to reproduce the bug ``` from datasets import load_dataset # This takes a few minutes to run, so go grab a tea or coffee while you wait :) data_files = "https://mystic.the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst" pubmed_dataset = load_dataset("json", data_files=data_files, split="train") pubmed_dataset ``` Result: `ConnectTimeout: HTTPSConnectionPool(host='mystic.the-eye.eu', port=443): Max retries exceeded with url: /public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst (Caused by ConnectTimeoutError(, 'Connection to mystic.the-eye.eu timed out. (connect timeout=10.0)'))` And ``` from datasets import load_dataset # This takes a few minutes to run, so go grab a tea or coffee while you wait :) data_files = "https://the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst" pubmed_dataset = load_dataset("json", data_files=data_files, split="train") pubmed_dataset ``` Result: `HTTPError: 404 Client Error: Not Found for URL: https://the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst` ### Expected behavior Downloading as normal. ### Environment info Environment info `datasets` version: 2.9.0 Platform: Windows Python version: 3.9.13
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6055/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6055/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6054
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6054/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6054/comments
https://api.github.com/repos/huggingface/datasets/issues/6054/events
https://github.com/huggingface/datasets/issues/6054
1,813,271,304
I_kwDODunzps5sFFMI
6,054
Multi-processed `Dataset.map` slows down a lot when `import torch`
{ "login": "ShinoharaHare", "id": 47121592, "node_id": "MDQ6VXNlcjQ3MTIxNTky", "avatar_url": "https://avatars.githubusercontent.com/u/47121592?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ShinoharaHare", "html_url": "https://github.com/ShinoharaHare", "followers_url": "https://api.github.com/users/ShinoharaHare/followers", "following_url": "https://api.github.com/users/ShinoharaHare/following{/other_user}", "gists_url": "https://api.github.com/users/ShinoharaHare/gists{/gist_id}", "starred_url": "https://api.github.com/users/ShinoharaHare/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ShinoharaHare/subscriptions", "organizations_url": "https://api.github.com/users/ShinoharaHare/orgs", "repos_url": "https://api.github.com/users/ShinoharaHare/repos", "events_url": "https://api.github.com/users/ShinoharaHare/events{/privacy}", "received_events_url": "https://api.github.com/users/ShinoharaHare/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892865, "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate", "name": "duplicate", "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists" } ]
closed
false
null
[]
null
[ "A duplicate of https://github.com/huggingface/datasets/issues/5929" ]
2023-07-20T06:36:14
2023-07-21T15:19:37
2023-07-21T15:19:37
NONE
null
null
null
### Describe the bug When using `Dataset.map` with `num_proc > 1`, the speed slows down much if I add `import torch` to the start of the script even though I don't use it. I'm not sure if it's `torch` only or if any other package that is "large" will also cause the same result. BTW, `import lightning` also slows it down. Below are the progress bars of `Dataset.map`, the only difference between them is with or without `import torch`, but the speed varies by 6-7 times. - without `import torch` ![image](https://github.com/huggingface/datasets/assets/47121592/0233055a-ced4-424a-9f0f-32a2afd802c2) - with `import torch` ![image](https://github.com/huggingface/datasets/assets/47121592/463eafb7-b81e-4eb9-91ca-fd7fe20f3d59) ### Steps to reproduce the bug Below is the code I used, but I don't think the dataset and the mapping function have much to do with the phenomenon. ```python3 from datasets import load_from_disk, disable_caching from transformers import AutoTokenizer # import torch # import lightning def rearrange_datapoints( batch, tokenizer, sequence_length, ): datapoints = [] input_ids = [] for x in batch['input_ids']: input_ids += x while len(input_ids) >= sequence_length: datapoint = input_ids[:sequence_length] datapoints.append(datapoint) input_ids[:sequence_length] = [] if input_ids: paddings = [-1] * (sequence_length - len(input_ids)) datapoint = paddings + input_ids if tokenizer.padding_side == 'left' else input_ids + paddings datapoints.append(datapoint) batch['input_ids'] = datapoints return batch if __name__ == '__main__': disable_caching() tokenizer = AutoTokenizer.from_pretrained('...', use_fast=False) dataset = load_from_disk('...') dataset = dataset.map( rearrange_datapoints, fn_kwargs=dict( tokenizer=tokenizer, sequence_length=2048, ), batched=True, num_proc=8, ) ``` ### Expected behavior The multi-processed `Dataset.map` function speed between with and without `import torch` should be the same. ### Environment info - `datasets` version: 2.13.1 - Platform: Linux-3.10.0-1127.el7.x86_64-x86_64-with-glibc2.31 - Python version: 3.10.11 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6054/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6054/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6053
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6053/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6053/comments
https://api.github.com/repos/huggingface/datasets/issues/6053/events
https://github.com/huggingface/datasets/issues/6053
1,812,635,902
I_kwDODunzps5sCqD-
6,053
Change package name from "datasets" to something less generic
{ "login": "geajack", "id": 2124157, "node_id": "MDQ6VXNlcjIxMjQxNTc=", "avatar_url": "https://avatars.githubusercontent.com/u/2124157?v=4", "gravatar_id": "", "url": "https://api.github.com/users/geajack", "html_url": "https://github.com/geajack", "followers_url": "https://api.github.com/users/geajack/followers", "following_url": "https://api.github.com/users/geajack/following{/other_user}", "gists_url": "https://api.github.com/users/geajack/gists{/gist_id}", "starred_url": "https://api.github.com/users/geajack/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/geajack/subscriptions", "organizations_url": "https://api.github.com/users/geajack/orgs", "repos_url": "https://api.github.com/users/geajack/repos", "events_url": "https://api.github.com/users/geajack/events{/privacy}", "received_events_url": "https://api.github.com/users/geajack/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "This would break a lot of existing code, so we can't really do this." ]
2023-07-19T19:53:28
2023-10-03T16:04:09
2023-10-03T16:04:09
NONE
null
null
null
### Feature request I'm repeatedly finding myself in situations where I want to have a package called `datasets.py` or `evaluate.py` in my code and can't because those names are being taken up by Huggingface packages. While I can understand how (even from the user's perspective) it's aesthetically pleasing to have nice terse library names, ultimately a library hogging simple names like this is something I find short-sighted, impractical and at my most irritable, frankly rude. My preference would be a pattern like what you get with all the other big libraries like numpy or pandas: ``` import huggingface as hf # hf.transformers, hf.datasets, hf.evaluate ``` or things like ``` import huggingface.transformers as tf # tf.load_model(), etc ``` If this isn't possible for some technical reason, at least just call the packages something like `hf_transformers` and so on. I realize this is a very big change that's probably been discussed internally already, but I'm making this issue and sister issues on each huggingface project just to start the conversation and begin tracking community feeling on the matter, since I suspect I'm not the only one who feels like this. Sorry if this has been requested already on this issue tracker, I couldn't find anything looking for terms like "package name". Sister issues: - [transformers](https://github.com/huggingface/transformers/issues/24934) - **datasets** - [evaluate](https://github.com/huggingface/evaluate/issues/476) ### Motivation Not taking up package names the user is likely to want to use. ### Your contribution No - more a matter of internal discussion among core library authors.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6053/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6053/timeline
null
not_planned
https://api.github.com/repos/huggingface/datasets/issues/6051
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6051/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6051/comments
https://api.github.com/repos/huggingface/datasets/issues/6051/events
https://github.com/huggingface/datasets/issues/6051
1,811,549,650
I_kwDODunzps5r-g3S
6,051
Skipping shard in the remote repo and resume upload
{ "login": "rs9000", "id": 9029817, "node_id": "MDQ6VXNlcjkwMjk4MTc=", "avatar_url": "https://avatars.githubusercontent.com/u/9029817?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rs9000", "html_url": "https://github.com/rs9000", "followers_url": "https://api.github.com/users/rs9000/followers", "following_url": "https://api.github.com/users/rs9000/following{/other_user}", "gists_url": "https://api.github.com/users/rs9000/gists{/gist_id}", "starred_url": "https://api.github.com/users/rs9000/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rs9000/subscriptions", "organizations_url": "https://api.github.com/users/rs9000/orgs", "repos_url": "https://api.github.com/users/rs9000/repos", "events_url": "https://api.github.com/users/rs9000/events{/privacy}", "received_events_url": "https://api.github.com/users/rs9000/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! `_select_contiguous` fetches a (zero-copy) slice of the dataset's Arrow table to build a shard, so I don't think this part is the problem. To me, the issue seems to be the step where we embed external image files' bytes (a lot of file reads). You can use `.map` with multiprocessing to perform this step before `push_to_hub` in a faster manner and cache it to disk:\r\n```python\r\nfrom datasets.table import embed_table_storage\r\n# load_dataset(...)\r\nformat = dataset.format\r\ndataset = dataset.with_format(\"arrow\")\r\ndataset = dataset.map(embed_table_storage, batched=True)\r\ndataset = dataset.with_format(**format)\r\n# push_to_hub(...)\r\n```\r\n\r\n(In Datasets 3.0, these external bytes will be written to an Arrow file when generating a dataset to avoid this \"embed\" step)", "Hi, thanks, this solution saves some time.\r\nBut can't we avoid embedding all external image files bytes with each push, skipping the images that have already been pushed into the repo?\r\n\r\nEdit: Ok I missed the part of cache it manually on the disk the first time, this solves the problem. Thank you" ]
2023-07-19T09:25:26
2023-07-20T18:16:01
2023-07-20T18:16:00
NONE
null
null
null
### Describe the bug For some reason when I try to resume the upload of my dataset, it is very slow to reach the index of the shard from which to resume the uploading. From my understanding, the problem is in this part of the code: arrow_dataset.py ```python for index, shard in logging.tqdm( enumerate(itertools.chain([first_shard], shards_iter)), desc="Pushing dataset shards to the dataset hub", total=num_shards, disable=not logging.is_progress_bar_enabled(), ): shard_path_in_repo = path_in_repo(index, shard) # Upload a shard only if it doesn't already exist in the repository if shard_path_in_repo not in data_files: ``` In particular, iterating the generator is slow during the call: ```python self._select_contiguous(start, length, new_fingerprint=new_fingerprint) ``` I wonder if it is possible to avoid calling this function for shards that are already uploaded and just start from the correct shard index. ### Steps to reproduce the bug 1. Start the upload ```python dataset = load_dataset("imagefolder", data_dir=DATA_DIR, split="train", drop_labels=True) dataset.push_to_hub("repo/name") ``` 2. Stop and restart the upload after hundreds of shards ### Expected behavior Skip the uploaded shards faster. ### Environment info - `datasets` version: 2.5.1 - Platform: Linux-4.18.0-193.el8.x86_64-x86_64-with-glibc2.17 - Python version: 3.8.16 - PyArrow version: 12.0.1 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6051/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6051/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6048
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6048/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6048/comments
https://api.github.com/repos/huggingface/datasets/issues/6048/events
https://github.com/huggingface/datasets/issues/6048
1,809,629,346
I_kwDODunzps5r3MCi
6,048
when i use datasets.load_dataset, i encounter the http connect error!
{ "login": "yangy1992", "id": 137855591, "node_id": "U_kgDOCDeCZw", "avatar_url": "https://avatars.githubusercontent.com/u/137855591?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yangy1992", "html_url": "https://github.com/yangy1992", "followers_url": "https://api.github.com/users/yangy1992/followers", "following_url": "https://api.github.com/users/yangy1992/following{/other_user}", "gists_url": "https://api.github.com/users/yangy1992/gists{/gist_id}", "starred_url": "https://api.github.com/users/yangy1992/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangy1992/subscriptions", "organizations_url": "https://api.github.com/users/yangy1992/orgs", "repos_url": "https://api.github.com/users/yangy1992/repos", "events_url": "https://api.github.com/users/yangy1992/events{/privacy}", "received_events_url": "https://api.github.com/users/yangy1992/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "The `audiofolder` loader is not available in version `2.3.2`, hence the error. Please run the `pip install -U datasets` command to update the `datasets` installation to make `load_dataset(\"audiofolder\", ...)` work." ]
2023-07-18T10:16:34
2023-07-18T16:18:39
2023-07-18T16:18:39
NONE
null
null
null
### Describe the bug `common_voice_test = load_dataset("audiofolder", data_dir="./dataset/",cache_dir="./cache",split=datasets.Split.TEST)` when i run the code above, i got the error as below: -------------------------------------------- ConnectionError: Couldn't reach https://raw.githubusercontent.com/huggingface/datasets/2.3.2/datasets/audiofolder/audiofolder.py (ConnectionError(MaxRetryError("HTTPSConnectionPool(host='raw.githubusercontent.com', port=443): Max retries exceeded with url: /huggingface/datasets/2.3.2/datasets/audiofolder/audiofolder.py (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f299ed082e0>: Failed to establish a new connection: [Errno 101] Network is unreachable'))"))) -------------------------------------------------- My all data is on local machine, why does it need to connect the internet? how can i fix it, because my machine cannot connect the internet. ### Steps to reproduce the bug 1 ### Expected behavior no error when i use the load_dataset func ### Environment info python=3.8.15
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6048/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6048/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6046
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6046/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6046/comments
https://api.github.com/repos/huggingface/datasets/issues/6046/events
https://github.com/huggingface/datasets/issues/6046
1,808,154,414
I_kwDODunzps5rxj8u
6,046
Support proxy and user-agent in fsspec calls
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 3761482852, "node_id": "LA_kwDODunzps7gM6xk", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20second%20issue", "name": "good second issue", "color": "BDE59C", "default": false, "description": "Issues a bit more difficult than \"Good First\" issues" } ]
open
false
{ "login": "zutarich", "id": 95092167, "node_id": "U_kgDOBar9xw", "avatar_url": "https://avatars.githubusercontent.com/u/95092167?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zutarich", "html_url": "https://github.com/zutarich", "followers_url": "https://api.github.com/users/zutarich/followers", "following_url": "https://api.github.com/users/zutarich/following{/other_user}", "gists_url": "https://api.github.com/users/zutarich/gists{/gist_id}", "starred_url": "https://api.github.com/users/zutarich/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zutarich/subscriptions", "organizations_url": "https://api.github.com/users/zutarich/orgs", "repos_url": "https://api.github.com/users/zutarich/repos", "events_url": "https://api.github.com/users/zutarich/events{/privacy}", "received_events_url": "https://api.github.com/users/zutarich/received_events", "type": "User", "site_admin": false }
[ { "login": "zutarich", "id": 95092167, "node_id": "U_kgDOBar9xw", "avatar_url": "https://avatars.githubusercontent.com/u/95092167?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zutarich", "html_url": "https://github.com/zutarich", "followers_url": "https://api.github.com/users/zutarich/followers", "following_url": "https://api.github.com/users/zutarich/following{/other_user}", "gists_url": "https://api.github.com/users/zutarich/gists{/gist_id}", "starred_url": "https://api.github.com/users/zutarich/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zutarich/subscriptions", "organizations_url": "https://api.github.com/users/zutarich/orgs", "repos_url": "https://api.github.com/users/zutarich/repos", "events_url": "https://api.github.com/users/zutarich/events{/privacy}", "received_events_url": "https://api.github.com/users/zutarich/received_events", "type": "User", "site_admin": false } ]
null
[ "hii @lhoestq can you assign this issue to me?\r\n", "You can reply \"#self-assign\" to this issue to automatically get assigned to it :)\r\nLet me know if you have any questions or if I can help", "#2289 ", "Actually i am quite new to figure it out how everything goes and done \r\n\r\n> You can reply \"#self-assign\" to this issue to automatically get assigned to it :)\r\n> Let me know if you have any questions or if I can help\r\n\r\nwhen i wrote #self-assign it automatically got converted to some number is it correct or i have done it some wrong way, I am quite new to open source thus wanna try to learn and explore it", "#2289 #self-assign ", "Ah yea github tries to replace the #self-assign with an issue link. I guess you can try to copy-paste instead to see if it works\r\n\r\nAnyway let me assign you manually", "thanks a lot @lhoestq ! though i have a very lil idea of the issue, i am new. as i said before, but gonna try my best shot to do it.\r\ncan you please suggest some tips or anything from your side, how basically we approach it will be really helpfull.\r\nWill try my best!", "The HfFileSystem from the `huggingface_hub` package can already read the HTTP_PROXY and HTTPS_PROXY environment variables. So the remaining thing missing is the `user_agent` that the user may include in a `DownloadConfig` object. The user agent can be used for regular http calls but also calls to the HfFileSystem.\r\n\r\n- for http, the `user_agent` isn't passed from `DownloadConfig` to `get_datasets_user_agent` in `_prepare_single_hop_path_and_storage_options` in `streaming_download_manager.py` so we need to include it\r\n- for HfFileSystem I think it requires a PR in https://github.com/huggingface/huggingface_hub to include it in the `HfFileSystem.__init__`" ]
2023-07-17T16:39:26
2023-10-09T13:49:14
null
MEMBER
null
null
null
Since we switched to the new HfFileSystem we no longer apply user's proxy and user-agent. Using the HTTP_PROXY and HTTPS_PROXY environment variables works though since we use aiohttp to call the HF Hub. This can be implemented in `_prepare_single_hop_path_and_storage_options`. Though ideally the `HfFileSystem` could support passing at least the proxies
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6046/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6046/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6043
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6043/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6043/comments
https://api.github.com/repos/huggingface/datasets/issues/6043/events
https://github.com/huggingface/datasets/issues/6043
1,807,771,750
I_kwDODunzps5rwGhm
6,043
Compression kwargs have no effect when saving datasets as csv
{ "login": "exs-avianello", "id": 128361578, "node_id": "U_kgDOB6akag", "avatar_url": "https://avatars.githubusercontent.com/u/128361578?v=4", "gravatar_id": "", "url": "https://api.github.com/users/exs-avianello", "html_url": "https://github.com/exs-avianello", "followers_url": "https://api.github.com/users/exs-avianello/followers", "following_url": "https://api.github.com/users/exs-avianello/following{/other_user}", "gists_url": "https://api.github.com/users/exs-avianello/gists{/gist_id}", "starred_url": "https://api.github.com/users/exs-avianello/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/exs-avianello/subscriptions", "organizations_url": "https://api.github.com/users/exs-avianello/orgs", "repos_url": "https://api.github.com/users/exs-avianello/repos", "events_url": "https://api.github.com/users/exs-avianello/events{/privacy}", "received_events_url": "https://api.github.com/users/exs-avianello/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Hello @exs-avianello, I have reproduced the bug successfully and have understood the problem. But I am confused regarding this part of the statement, \"`pandas.DataFrame.to_csv` is always called with a buf-like `path_or_buf`\".\r\n\r\nCan you please elaborate on it?\r\n\r\nThanks!", "Hi @aryanxk02 ! Sure, what I actually meant is that when passing a path-like `path_or_buf` here\r\n\r\nhttps://github.com/huggingface/datasets/blob/14f6edd9222e577dccb962ed5338b79b73502fa5/src/datasets/arrow_dataset.py#L4708-L4714 \r\n\r\nit gets converted to a file object behind the scenes here\r\n\r\nhttps://github.com/huggingface/datasets/blob/14f6edd9222e577dccb962ed5338b79b73502fa5/src/datasets/io/csv.py#L92-L94\r\n\r\nand the eventual pandas `.to_csv()` calls that write to it always get `path_or_buf=None`, making pandas ignore the `compression` kwarg in the `to_csv_kwargs`\r\n\r\nhttps://github.com/huggingface/datasets/blob/14f6edd9222e577dccb962ed5338b79b73502fa5/src/datasets/io/csv.py#L107-L109", "@exs-avianello When `path_or_buf` is set to None, the `to_csv()` method will return the CSV data as a string instead of saving it to a file. Hence the compression doesn't take place. I think setting `path_or_buf=self.path_or_buf` should work. What you say?" ]
2023-07-17T13:19:21
2023-07-22T17:34:18
null
NONE
null
null
null
### Describe the bug Attempting to save a dataset as a compressed csv file, the compression kwargs provided to `.to_csv()` that get piped to panda's `pandas.DataFrame.to_csv` do not have any effect - resulting in the dataset not getting compressed. A warning is raised if explicitly providing a `compression` kwarg, but no warnings are raised if relying on the defaults. This can lead to datasets secretly not getting compressed for users expecting the behaviour to match panda's `.to_csv()`, where the compression format is automatically inferred from the destination path suffix. ### Steps to reproduce the bug ```python # dataset is not compressed (but at least a warning is emitted) import datasets dataset = datasets.load_dataset("rotten_tomatoes", split="train") dataset.to_csv("uncompressed.csv") print(os.path.getsize("uncompressed.csv")) # 1008607 dataset.to_csv("compressed.csv.gz", compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}) print(os.path.getsize("compressed.csv.gz")) # 1008607 ``` ```shell >>> RuntimeWarning: compression has no effect when passing a non-binary object as input. csv_str = batch.to_pandas().to_csv( ``` ```python # dataset is not compressed and no warnings are emitted dataset.to_csv("compressed.csv.gz") print(os.path.getsize("compressed.csv.gz")) # 1008607 # compare with dataset.to_pandas().to_csv("pandas.csv.gz") print(os.path.getsize("pandas.csv.gz")) # 418561 ``` --- I think that this is because behind the scenes `pandas.DataFrame.to_csv` is always called with a buf-like `path_or_buf`, but users that are providing a path-like to `datasets.Dataset.to_csv` are likely not to expect / know that - leading to a mismatch in their understanding of the expected behaviour of the `compression` kwarg. ### Expected behavior The dataset to be saved as a compressed csv file when providing a `compression` kwarg, or when relying on the default `compression='infer'` ### Environment info `datasets == 2.13.1`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6043/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6043/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6039
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6039/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6039/comments
https://api.github.com/repos/huggingface/datasets/issues/6039/events
https://github.com/huggingface/datasets/issues/6039
1,806,508,451
I_kwDODunzps5rrSGj
6,039
Loading column subset from parquet file produces error since version 2.13
{ "login": "kklemon", "id": 1430243, "node_id": "MDQ6VXNlcjE0MzAyNDM=", "avatar_url": "https://avatars.githubusercontent.com/u/1430243?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kklemon", "html_url": "https://github.com/kklemon", "followers_url": "https://api.github.com/users/kklemon/followers", "following_url": "https://api.github.com/users/kklemon/following{/other_user}", "gists_url": "https://api.github.com/users/kklemon/gists{/gist_id}", "starred_url": "https://api.github.com/users/kklemon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kklemon/subscriptions", "organizations_url": "https://api.github.com/users/kklemon/orgs", "repos_url": "https://api.github.com/users/kklemon/repos", "events_url": "https://api.github.com/users/kklemon/events{/privacy}", "received_events_url": "https://api.github.com/users/kklemon/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2023-07-16T09:13:07
2023-07-24T14:35:04
2023-07-24T14:35:04
NONE
null
null
null
### Describe the bug `load_dataset` allows loading a subset of columns from a parquet file with the `columns` argument. Since version 2.13, this produces the following error: ``` Traceback (most recent call last): File "/usr/lib/python3.10/site-packages/datasets/builder.py", line 1879, in _prepare_split_single for _, table in generator: File "/usr/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py", line 68, in _generate_tables raise ValueError( ValueError: Tried to load parquet data with columns '['sepal_length']' with mismatching features '{'sepal_length': Value(dtype='float64', id=None), 'sepal_width': Value(dtype='float64', id=None), 'petal_length': Value(dtype='float64', id=None), 'petal_width': Value(dtype='float64', id=None), 'species': Value(dtype='string', id=None)}' ``` This seems to occur because `datasets` is checking whether the columns in the schema exactly match the provided list of columns, instead of whether they are a subset. ### Steps to reproduce the bug ```python # Prepare some sample data import pandas as pd iris = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv') iris.to_parquet('iris.parquet') # ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'] print(iris.columns) # Load data with datasets from datasets import load_dataset # Load full parquet file dataset = load_dataset('parquet', data_files='iris.parquet') # Load column subset; throws error for datasets>=2.13 dataset = load_dataset('parquet', data_files='iris.parquet', columns=['sepal_length']) ``` ### Expected behavior No error should be thrown and the given column subset should be loaded. ### Environment info - `datasets` version: 2.13.0 - Platform: Linux-5.15.0-76-generic-x86_64-with-glibc2.35 - Python version: 3.10.9 - Huggingface_hub version: 0.16.4 - PyArrow version: 12.0.1 - Pandas version: 1.5.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6039/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6039/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6038
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6038/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6038/comments
https://api.github.com/repos/huggingface/datasets/issues/6038/events
https://github.com/huggingface/datasets/issues/6038
1,805,960,244
I_kwDODunzps5rpMQ0
6,038
File "/home/zhizhou/anaconda3/envs/pytorch/lib/python3.10/site-packages/datasets/builder.py", line 992, in _download_and_prepare if str(split_generator.split_info.name).lower() == "all": AttributeError: 'str' object has no attribute 'split_info'. Did you mean: 'splitlines'?
{ "login": "BaiMeiyingxue", "id": 53547009, "node_id": "MDQ6VXNlcjUzNTQ3MDA5", "avatar_url": "https://avatars.githubusercontent.com/u/53547009?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BaiMeiyingxue", "html_url": "https://github.com/BaiMeiyingxue", "followers_url": "https://api.github.com/users/BaiMeiyingxue/followers", "following_url": "https://api.github.com/users/BaiMeiyingxue/following{/other_user}", "gists_url": "https://api.github.com/users/BaiMeiyingxue/gists{/gist_id}", "starred_url": "https://api.github.com/users/BaiMeiyingxue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BaiMeiyingxue/subscriptions", "organizations_url": "https://api.github.com/users/BaiMeiyingxue/orgs", "repos_url": "https://api.github.com/users/BaiMeiyingxue/repos", "events_url": "https://api.github.com/users/BaiMeiyingxue/events{/privacy}", "received_events_url": "https://api.github.com/users/BaiMeiyingxue/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Instead of writing the loading script, you can use the built-in loader to [load JSON files](https://huggingface.co/docs/datasets/loading#json):\r\n```python\r\nfrom datasets import load_dataset\r\nds = load_dataset(\"json\", data_files={\"train\": os.path.join(data_dir[\"train\"]), \"dev\": os.path.join(data_dir[\"dev\"])})\r\n```" ]
2023-07-15T07:58:08
2023-07-24T11:54:15
2023-07-24T11:54:15
NONE
null
null
null
Hi, I use the code below to load local file ``` def _split_generators(self, dl_manager): # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive # urls = _URLS[self.config.name] data_dir = dl_manager.download_and_extract(_URLs) print(data_dir) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir["train"]), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir["dev"]), "split": "dev", }, ), ] ``` and error occured ``` Traceback (most recent call last): File "/home/zhizhou/data1/zhanghao/huggingface/FineTuning_Transformer/load_local_dataset.py", line 2, in <module> dataset = load_dataset("./QA_script.py",data_files='/home/zhizhou/.cache/huggingface/datasets/conversatiom_corps/part_file.json') File "/home/zhizhou/anaconda3/envs/pytorch/lib/python3.10/site-packages/datasets/load.py", line 1809, in load_dataset builder_instance.download_and_prepare( File "/home/zhizhou/anaconda3/envs/pytorch/lib/python3.10/site-packages/datasets/builder.py", line 909, in download_and_prepare self._download_and_prepare( File "/home/zhizhou/anaconda3/envs/pytorch/lib/python3.10/site-packages/datasets/builder.py", line 1670, in _download_and_prepare super()._download_and_prepare( File "/home/zhizhou/anaconda3/envs/pytorch/lib/python3.10/site-packages/datasets/builder.py", line 992, in _download_and_prepare if str(split_generator.split_info.name).lower() == "all": AttributeError: 'str' object has no attribute 'split_info'. Did you mean: 'splitlines'? ``` Could you help me?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6038/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6038/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6037
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6037/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6037/comments
https://api.github.com/repos/huggingface/datasets/issues/6037/events
https://github.com/huggingface/datasets/issues/6037
1,805,887,184
I_kwDODunzps5ro6bQ
6,037
Documentation links to examples are broken
{ "login": "david-waterworth", "id": 5028974, "node_id": "MDQ6VXNlcjUwMjg5NzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/5028974?v=4", "gravatar_id": "", "url": "https://api.github.com/users/david-waterworth", "html_url": "https://github.com/david-waterworth", "followers_url": "https://api.github.com/users/david-waterworth/followers", "following_url": "https://api.github.com/users/david-waterworth/following{/other_user}", "gists_url": "https://api.github.com/users/david-waterworth/gists{/gist_id}", "starred_url": "https://api.github.com/users/david-waterworth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david-waterworth/subscriptions", "organizations_url": "https://api.github.com/users/david-waterworth/orgs", "repos_url": "https://api.github.com/users/david-waterworth/repos", "events_url": "https://api.github.com/users/david-waterworth/events{/privacy}", "received_events_url": "https://api.github.com/users/david-waterworth/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "These docs are outdated (version 1.2.1 is over two years old). Please refer to [this](https://huggingface.co/docs/datasets/dataset_script) version instead.\r\n\r\nInitially, we hosted datasets in this repo, but now you can find them [on the HF Hub](https://huggingface.co/datasets) (e.g. the [`ag_news`](https://huggingface.co/datasets/ag_news/blob/main/ag_news.py) script)", "Sorry I thought I'd selected the latest version." ]
2023-07-15T04:54:50
2023-07-17T22:35:14
2023-07-17T15:10:32
NONE
null
null
null
### Describe the bug The links at the bottom of [add_dataset](https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html) to examples of specific datasets are all broken, for example - text classification: [ag_news](https://github.com/huggingface/datasets/blob/master/datasets/ag_news/ag_news.py) (original data are in csv files) ### Steps to reproduce the bug Click on links to examples from latest documentation ### Expected behavior Links should be up to date - it might be more stable to link to https://huggingface.co/datasets/ag_news/blob/main/ag_news.py ### Environment info dataset v1.2.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6037/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6037/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6034
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6034/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6034/comments
https://api.github.com/repos/huggingface/datasets/issues/6034/events
https://github.com/huggingface/datasets/issues/6034
1,804,501,361
I_kwDODunzps5rjoFx
6,034
load_dataset hangs on WSL
{ "login": "Andy-Zhou2", "id": 20140522, "node_id": "MDQ6VXNlcjIwMTQwNTIy", "avatar_url": "https://avatars.githubusercontent.com/u/20140522?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Andy-Zhou2", "html_url": "https://github.com/Andy-Zhou2", "followers_url": "https://api.github.com/users/Andy-Zhou2/followers", "following_url": "https://api.github.com/users/Andy-Zhou2/following{/other_user}", "gists_url": "https://api.github.com/users/Andy-Zhou2/gists{/gist_id}", "starred_url": "https://api.github.com/users/Andy-Zhou2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Andy-Zhou2/subscriptions", "organizations_url": "https://api.github.com/users/Andy-Zhou2/orgs", "repos_url": "https://api.github.com/users/Andy-Zhou2/repos", "events_url": "https://api.github.com/users/Andy-Zhou2/events{/privacy}", "received_events_url": "https://api.github.com/users/Andy-Zhou2/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Even if a dataset is cached, we still make requests to check whether the cache is up-to-date. [This](https://huggingface.co/docs/datasets/v2.13.1/en/loading#offline) section in the docs explains how to avoid them and directly load the cached version.", "Thanks - that works! However it doesn't resolve the original issue (but I am not sure if it is a WSL problem)", "We use `requests` to make HTTP requests (and `aiohttp` in the streaming mode), so I don't think we can provide much help regarding the socket issue (it probably has something to do with WSL). " ]
2023-07-14T09:03:10
2023-07-14T14:48:29
2023-07-14T14:48:29
NONE
null
null
null
### Describe the bug load_dataset simply hangs. It happens once every ~5 times, and interestingly hangs for a multiple of 5 minutes (hangs for 5/10/15 minutes). Using the profiler in PyCharm shows that it spends the time at <method 'connect' of '_socket.socket' objects>. However, a local cache is available so I am not sure why socket is needed. ([profiler result](https://ibb.co/0Btbbp8)) It only happens on WSL for me. It works for native Windows and my MacBook. (cache quickly recognized and loaded within a second). ### Steps to reproduce the bug I am using Ubuntu 22.04.2 LTS (GNU/Linux 5.15.90.1-microsoft-standard-WSL2 x86_64) Python 3.10.10 (main, Mar 21 2023, 18:45:11) [GCC 11.2.0] on linux >>> import datasets >>> datasets.load_dataset('ai2_arc', 'ARC-Challenge') # hangs for 5/10/15 minutes ### Expected behavior cache quickly recognized and loaded within a second ### Environment info Please let me know if I should provide more environment information.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6034/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6034/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6033
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6033/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6033/comments
https://api.github.com/repos/huggingface/datasets/issues/6033/events
https://github.com/huggingface/datasets/issues/6033
1,804,482,051
I_kwDODunzps5rjjYD
6,033
`map` function doesn't fully utilize `input_columns`.
{ "login": "kwonmha", "id": 8953934, "node_id": "MDQ6VXNlcjg5NTM5MzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/8953934?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kwonmha", "html_url": "https://github.com/kwonmha", "followers_url": "https://api.github.com/users/kwonmha/followers", "following_url": "https://api.github.com/users/kwonmha/following{/other_user}", "gists_url": "https://api.github.com/users/kwonmha/gists{/gist_id}", "starred_url": "https://api.github.com/users/kwonmha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kwonmha/subscriptions", "organizations_url": "https://api.github.com/users/kwonmha/orgs", "repos_url": "https://api.github.com/users/kwonmha/repos", "events_url": "https://api.github.com/users/kwonmha/events{/privacy}", "received_events_url": "https://api.github.com/users/kwonmha/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2023-07-14T08:49:28
2023-07-14T09:16:04
2023-07-14T09:16:04
NONE
null
null
null
### Describe the bug I wanted to select only some columns of data. And I thought that's why the argument `input_columns` exists. What I expected is like this: If there are ["a", "b", "c", "d"] columns, and if I set `input_columns=["a", "d"]`, the data will have only ["a", "d"] columns. But it doesn't select columns. It preserves existing columns. The main cause is `update` function of `dictionary` type `transformed_batch`. https://github.com/huggingface/datasets/blob/682d21e94ab1e64c11b583de39dc4c93f0101c5a/src/datasets/iterable_dataset.py#L687-L691 `transformed_batch` gets all the columns by `transformed_batch = dict(batch)`. Even `function_args` selects `input_columns`, `update` preserves columns other than `input_columns`. I think it should take a new dictionary with columns in `input_columns` like this: ``` # transformed_batch = dict(batch) # transformed_batch.update(self.function(*function_args, **self.fn_kwargs) # This is what I think correct. transformed_batch = self.function(*function_args, **self.fn_kwargs) ``` Let me know how to use `input_columns`. ### Steps to reproduce the bug Described all above. ### Expected behavior Described all above. ### Environment info datasets: 2.12 python: 3.8
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6033/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6033/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6032
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6032/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6032/comments
https://api.github.com/repos/huggingface/datasets/issues/6032/events
https://github.com/huggingface/datasets/issues/6032
1,804,358,679
I_kwDODunzps5rjFQX
6,032
DownloadConfig.proxies not work when load_dataset_builder calling HfApi.dataset_info
{ "login": "codingl2k1", "id": 138426806, "node_id": "U_kgDOCEA5tg", "avatar_url": "https://avatars.githubusercontent.com/u/138426806?v=4", "gravatar_id": "", "url": "https://api.github.com/users/codingl2k1", "html_url": "https://github.com/codingl2k1", "followers_url": "https://api.github.com/users/codingl2k1/followers", "following_url": "https://api.github.com/users/codingl2k1/following{/other_user}", "gists_url": "https://api.github.com/users/codingl2k1/gists{/gist_id}", "starred_url": "https://api.github.com/users/codingl2k1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/codingl2k1/subscriptions", "organizations_url": "https://api.github.com/users/codingl2k1/orgs", "repos_url": "https://api.github.com/users/codingl2k1/repos", "events_url": "https://api.github.com/users/codingl2k1/events{/privacy}", "received_events_url": "https://api.github.com/users/codingl2k1/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "`HfApi` comes from the `huggingface_hub` package. You can use [this](https://huggingface.co/docs/huggingface_hub/v0.16.3/en/package_reference/utilities#huggingface_hub.configure_http_backend) utility to change the `huggingface_hub`'s `Session` proxies (see the example).\r\n\r\nWe plan to implement https://github.com/huggingface/datasets/issues/5080 and make this behavior more consistent eventually.", "> this\r\n\r\nThanks. I will try `huggingface_hub.configure_http_backend` to change session's config.", "@mariosasko are you saying if I do the following:\r\n\r\n```\r\ndef backend_factory() -> requests.Session:\r\n session = requests.Session()\r\n session.proxies = {\r\n \"https\": \"127.0.0.1:8887\",\r\n \"http\": \"127.0.0.1:8887\",\r\n }\r\n session.verify = \"/etc/ssl/certs/ca-certificates.crt\"\r\n return session\r\n\r\n# Set it as the default session factory\r\nconfigure_http_backend(backend_factory=backend_factory)\r\n```\r\n\r\nwhich works nicely with transformer library:\r\n\r\n```\r\ndef download_gpt_2_model():\r\n tokenizer = GPT2Tokenizer.from_pretrained(\r\n \"gpt2\", force_download=True, resume_download=False\r\n )\r\n text = \"Replace me by any text you'd like.\"\r\n encoded_input = tokenizer(text, return_tensors=\"pt\")\r\n print(encoded_input)\r\n\r\n model = GPT2Model.from_pretrained(\r\n \"gpt2\", force_download=True, resume_download=False\r\n )\r\n output = model(**encoded_input)\r\n```\r\n\r\nshould work for datasets library as well ?\r\n\r\nIn my case if I just do:\r\n\r\n```\r\ndef download_sts12_sts_dataset():\r\n dataset = load_dataset(\r\n \"mteb/sts12-sts\",\r\n download_mode=\"force_redownload\",\r\n verification_mode=\"basic_checks\",\r\n revision=\"main\",\r\n )\r\n\r\n```\r\nI am getting:\r\n`ConnectionError: Couldn't reach https://huggingface.co/datasets/mteb/sts12-sts/resolve/main/dataset_infos.json (ConnectTimeout(MaxRetryError(\"HTTPSConnectionPool(host='huggingface.co', port=443): Max retries exceeded with url: /datasets/mteb/sts12-sts/resolve/main/dataset_infos.json (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7f429e87a3a0>, 'Connection to huggingface.co timed out. (connect timeout=100)'))\")))`\r\n\r\nwhich is typical when the proxy server is not defined. Looks like what is set in configure_http_backend(backend_factory=backend_factory) is ignore.\r\n\r\nIf I use env variable instead, it is working \r\n```\r\ndef download_sts12_sts_dataset():\r\n\r\n os.environ[\"https_proxy\"] = \"127.0.0.1:8887\"\r\n os.environ[\"http_proxy\"] = \"127.0.0.1:8887\"\r\n os.environ[\"REQUESTS_CA_BUNDLE\"] = \"/etc/ssl/certs/ca-certificates.crt\"\r\n\r\n dataset = load_dataset(\r\n \"mteb/sts12-sts\",\r\n download_mode=\"force_redownload\",\r\n verification_mode=\"basic_checks\",\r\n revision=\"main\",\r\n )\r\n```\r\n\r\nShould I add something ?\r\n\r\nI am using `huggingface_hub 0.15.1`, `datasets 2.13.0`, `transformers 4.30.2`", "`huggingface_hub.configure_http_backend` works for `transformers` because they only use the `huggingface_hub` lib for downloads. Our download logic is a bit more complex (e.g., we also support downloading non-Hub files), so we are not aligned with them yet. In the meantime, it's best to use the env vars.", "@mariosasko I fully understand that the logic for dataset is different. I see 2 issues with the current implementation of the env variables:\r\n\r\n- having the same https_proxy/http_prox/no_proxy env variables for all tools is not good in some case. For example I have 2 differents proxy server. In 2019 we had discussion with the Tensorflow teams and they recommended to do the following: TFDS_HTTP_PROXY, TFDS_HTTPS_PROXY ...\r\n- with recent version of requests, it is not possible to deactivate TLS interception (verify=false) by using env variable. This is useful to debug things and in some case TLS is not working and you need to ignore verifying the SSL certificate (probably not recommended) \r\n\r\nOne of the best way is to able to pass our requests.Session() directly\r\n```\r\nimport openai\r\nsession = requests.Session()\r\nsession.cert = CERT\r\nsession.verify = False\r\nopenai.requestssession = session\r\n```\r\n\r\nMy 2 cents in this discussion" ]
2023-07-14T07:22:55
2023-09-11T13:50:41
null
NONE
null
null
null
### Describe the bug ```python download_config = DownloadConfig(proxies={'https': '<my proxy>'}) builder = load_dataset_builder(..., download_config=download_config) ``` But, when getting the dataset_info from HfApi, the http requests not using the proxies. ### Steps to reproduce the bug 1. Setup proxies in DownloadConfig. 2. Call `load_dataset_build` with download_config. 3. Inspect the call stack in HfApi.dataset_info. ![image](https://github.com/huggingface/datasets/assets/138426806/33e538a8-2e22-4e63-b634-343febe5324b) ### Expected behavior DownloadConfig.proxies works for getting dataset_info. ### Environment info https://github.com/huggingface/datasets/commit/406b2212263c0d33f267e35b917f410ff6b3bc00 Python 3.11.4
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6032/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6032/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6031
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6031/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6031/comments
https://api.github.com/repos/huggingface/datasets/issues/6031/events
https://github.com/huggingface/datasets/issues/6031
1,804,183,858
I_kwDODunzps5riaky
6,031
Argument type for map function changes when using `input_columns` for `IterableDataset`
{ "login": "kwonmha", "id": 8953934, "node_id": "MDQ6VXNlcjg5NTM5MzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/8953934?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kwonmha", "html_url": "https://github.com/kwonmha", "followers_url": "https://api.github.com/users/kwonmha/followers", "following_url": "https://api.github.com/users/kwonmha/following{/other_user}", "gists_url": "https://api.github.com/users/kwonmha/gists{/gist_id}", "starred_url": "https://api.github.com/users/kwonmha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kwonmha/subscriptions", "organizations_url": "https://api.github.com/users/kwonmha/orgs", "repos_url": "https://api.github.com/users/kwonmha/repos", "events_url": "https://api.github.com/users/kwonmha/events{/privacy}", "received_events_url": "https://api.github.com/users/kwonmha/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Yes, this is intended." ]
2023-07-14T05:11:14
2023-07-14T14:44:15
2023-07-14T14:44:15
NONE
null
null
null
### Describe the bug I wrote `tokenize(examples)` function as an argument for `map` function for `IterableDataset`. It process dictionary type `examples` as a parameter. It is used in `train_dataset = train_dataset.map(tokenize, batched=True)` No error is raised. And then, I found some unnecessary keys and values in `examples` so I added `input_columns` argument to `map` function to select keys and values. It gives me an error saying ``` TypeError: tokenize() takes 1 positional argument but 3 were given. ``` The code below matters. https://github.com/huggingface/datasets/blob/406b2212263c0d33f267e35b917f410ff6b3bc00/src/datasets/iterable_dataset.py#L687 For example, `inputs = {"a":1, "b":2, "c":3}`. If `self.input_coluns` is `None`, `inputs` is a dictionary type variable and `function_args` becomes a `list` of a single `dict` variable. `function_args` becomes `[{"a":1, "b":2, "c":3}]` Otherwise, lets say `self.input_columns = ["a", "c"]` `[inputs[col] for col in self.input_columns]` results in `[1, 3]`. I think it should be `[{"a":1, "c":3}]`. I want to ask if the resulting format is intended. Maybe I can modify `tokenize()` to have 2 parameters in this case instead of having 1 dictionary. But this is confusing to me. Or it should be fixed as `[{col:inputs[col] for col in self.input_columns}]` ### Steps to reproduce the bug Run `map` function of `IterableDataset` with `input_columns` argument. ### Expected behavior `function_args` looks better to have same format. I think it should be `[{"a":1, "c":3}]`. ### Environment info dataset version: 2.12 python: 3.8
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6031/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6031/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6025
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6025/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6025/comments
https://api.github.com/repos/huggingface/datasets/issues/6025/events
https://github.com/huggingface/datasets/issues/6025
1,801,852,601
I_kwDODunzps5rZha5
6,025
Using a dataset for a use other than it was intended for.
{ "login": "surya-narayanan", "id": 17240858, "node_id": "MDQ6VXNlcjE3MjQwODU4", "avatar_url": "https://avatars.githubusercontent.com/u/17240858?v=4", "gravatar_id": "", "url": "https://api.github.com/users/surya-narayanan", "html_url": "https://github.com/surya-narayanan", "followers_url": "https://api.github.com/users/surya-narayanan/followers", "following_url": "https://api.github.com/users/surya-narayanan/following{/other_user}", "gists_url": "https://api.github.com/users/surya-narayanan/gists{/gist_id}", "starred_url": "https://api.github.com/users/surya-narayanan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surya-narayanan/subscriptions", "organizations_url": "https://api.github.com/users/surya-narayanan/orgs", "repos_url": "https://api.github.com/users/surya-narayanan/repos", "events_url": "https://api.github.com/users/surya-narayanan/events{/privacy}", "received_events_url": "https://api.github.com/users/surya-narayanan/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "I've opened a PR with a fix. In the meantime, you can avoid the error by deleting `task_templates` with `dataset.info.task_templates = None` before the `interleave_datasets` call.\r\n` " ]
2023-07-12T22:33:17
2023-07-13T13:57:36
2023-07-13T13:57:36
NONE
null
null
null
### Describe the bug Hi, I want to use the rotten tomatoes dataset but for a task other than classification, but when I interleave the dataset, it throws ```'ValueError: Column label is not present in features.'```. It seems that the label_col must be there in the dataset for some reason? Here is the full stacktrace ``` File "/home/suryahari/Vornoi/tryage-handoff-other-datasets.py", line 276, in create_dataloaders dataset = interleave_datasets(dsfold, stopping_strategy="all_exhausted") File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/combine.py", line 134, in interleave_datasets return _interleave_iterable_datasets( File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/iterable_dataset.py", line 1833, in _interleave_iterable_datasets info = DatasetInfo.from_merge([d.info for d in datasets]) File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/info.py", line 275, in from_merge dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/info.py", line 275, in <listcomp> dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/info.py", line 378, in copy return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) File "<string>", line 20, in __init__ File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/info.py", line 208, in __post_init__ self.task_templates = [ File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/info.py", line 209, in <listcomp> template.align_with_features(self.features) for template in (self.task_templates) File "/home/suryahari/miniconda3/envs/vornoi/lib/python3.10/site-packages/datasets/tasks/text_classification.py", line 20, in align_with_features raise ValueError(f"Column {self.label_column} is not present in features.") ValueError: Column label is not present in features. ``` ### Steps to reproduce the bug Delete the column `labels` from the `rotten_tomatoes` dataset. Try to interleave it with other datasets. ### Expected behavior Should let me use the dataset with just the `text` field ### Environment info latest datasets library? I don't think this was an issue in earlier versions.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6025/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6025/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6022
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6022/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6022/comments
https://api.github.com/repos/huggingface/datasets/issues/6022/events
https://github.com/huggingface/datasets/issues/6022
1,800,092,589
I_kwDODunzps5rSzut
6,022
Batch map raises TypeError: '>=' not supported between instances of 'NoneType' and 'int'
{ "login": "codingl2k1", "id": 138426806, "node_id": "U_kgDOCEA5tg", "avatar_url": "https://avatars.githubusercontent.com/u/138426806?v=4", "gravatar_id": "", "url": "https://api.github.com/users/codingl2k1", "html_url": "https://github.com/codingl2k1", "followers_url": "https://api.github.com/users/codingl2k1/followers", "following_url": "https://api.github.com/users/codingl2k1/following{/other_user}", "gists_url": "https://api.github.com/users/codingl2k1/gists{/gist_id}", "starred_url": "https://api.github.com/users/codingl2k1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/codingl2k1/subscriptions", "organizations_url": "https://api.github.com/users/codingl2k1/orgs", "repos_url": "https://api.github.com/users/codingl2k1/repos", "events_url": "https://api.github.com/users/codingl2k1/events{/privacy}", "received_events_url": "https://api.github.com/users/codingl2k1/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Thanks for reporting! I've opened a PR with a fix." ]
2023-07-12T03:20:17
2023-07-12T16:18:06
2023-07-12T16:18:05
NONE
null
null
null
### Describe the bug When mapping some datasets with `batched=True`, datasets may raise an exeception: ```python Traceback (most recent call last): File "/Users/codingl2k1/Work/datasets/venv/lib/python3.11/site-packages/multiprocess/pool.py", line 125, in worker result = (True, func(*args, **kwds)) ^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/utils/py_utils.py", line 1328, in _write_generator_to_queue for i, result in enumerate(func(**kwargs)): File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 3483, in _map_single writer.write_batch(batch) File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_writer.py", line 549, in write_batch array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/table.py", line 1831, in wrapper return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/table.py", line 1831, in <listcomp> return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/table.py", line 2063, in cast_array_to_feature return feature.cast_storage(array) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/features/features.py", line 1098, in cast_storage if min_max["max"] >= self.num_classes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: '>=' not supported between instances of 'NoneType' and 'int' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/codingl2k1/Work/datasets/t1.py", line 33, in <module> ds = ds.map(transforms, num_proc=14, batched=True, batch_size=5) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/dataset_dict.py", line 850, in map { File "/Users/codingl2k1/Work/datasets/src/datasets/dataset_dict.py", line 851, in <dictcomp> k: dataset.map( ^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 577, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 542, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/src/datasets/arrow_dataset.py", line 3179, in map for rank, done, content in iflatmap_unordered( File "/Users/codingl2k1/Work/datasets/src/datasets/utils/py_utils.py", line 1368, in iflatmap_unordered [async_result.get(timeout=0.05) for async_result in async_results] File "/Users/codingl2k1/Work/datasets/src/datasets/utils/py_utils.py", line 1368, in <listcomp> [async_result.get(timeout=0.05) for async_result in async_results] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/codingl2k1/Work/datasets/venv/lib/python3.11/site-packages/multiprocess/pool.py", line 774, in get raise self._value TypeError: '>=' not supported between instances of 'NoneType' and 'int' ``` ### Steps to reproduce the bug 1. Checkout the latest main of datasets. 2. Run the code: ```python from datasets import load_dataset def transforms(examples): # examples["pixel_values"] = [image.convert("RGB").resize((100, 100)) for image in examples["image"]] return examples ds = load_dataset("scene_parse_150") ds = ds.map(transforms, num_proc=14, batched=True, batch_size=5) print(ds) ``` ### Expected behavior map without exception. ### Environment info Datasets: https://github.com/huggingface/datasets/commit/b8067c0262073891180869f700ebef5ac3dc5cce Python: 3.11.4 System: Macos
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6022/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6022/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6020
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6020/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6020/comments
https://api.github.com/repos/huggingface/datasets/issues/6020/events
https://github.com/huggingface/datasets/issues/6020
1,799,720,536
I_kwDODunzps5rRY5Y
6,020
Inconsistent "The features can't be aligned" error when combining map, multiprocessing, and variable length outputs
{ "login": "kheyer", "id": 38166299, "node_id": "MDQ6VXNlcjM4MTY2Mjk5", "avatar_url": "https://avatars.githubusercontent.com/u/38166299?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kheyer", "html_url": "https://github.com/kheyer", "followers_url": "https://api.github.com/users/kheyer/followers", "following_url": "https://api.github.com/users/kheyer/following{/other_user}", "gists_url": "https://api.github.com/users/kheyer/gists{/gist_id}", "starred_url": "https://api.github.com/users/kheyer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kheyer/subscriptions", "organizations_url": "https://api.github.com/users/kheyer/orgs", "repos_url": "https://api.github.com/users/kheyer/repos", "events_url": "https://api.github.com/users/kheyer/events{/privacy}", "received_events_url": "https://api.github.com/users/kheyer/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "This scenario currently requires explicitly passing the target features (to avoid the error): \r\n```python\r\nimport datasets\r\n\r\n...\r\n\r\nfeatures = dataset.features\r\nfeatures[\"output\"] = = [{\"test\": datasets.Value(\"int64\")}]\r\ntest2 = dataset.map(lambda row, idx: test_func(row, idx), with_indices=True, num_proc=32, features=features)\r\n```", "I just encountered the same error in the same situation (multiprocessing with variable length outputs).\r\n\r\nThe funny (or dangerous?) thing is, that this error only showed up when testing with a small test dataset (16 examples, ValueError with `num_proc` >1) but the same code works fine for the full dataset (~70k examples).\r\n\r\n@mariosasko Any idea on how to do that with a nested feature with lists of variable lengths containing dicts?\r\n\r\nEDIT: Was able to narrow it down: >200 Examples: no error, <150 Examples: Error. \r\nNow idea what to make of this but pretty obvious that this is a bug....", "This error also occurs while concatenating the datasets." ]
2023-07-11T20:40:38
2024-02-10T19:24:29
null
NONE
null
null
null
### Describe the bug I'm using a dataset with map and multiprocessing to run a function that returned a variable length list of outputs. This output list may be empty. Normally this is handled fine, but there is an edge case that crops up when using multiprocessing. In some cases, an empty list result ends up in a dataset shard consisting of a single item. This results in a `The features can't be aligned` error that is difficult to debug because it depends on the number of processes/shards used. I've reproduced a minimal example below. My current workaround is to fill empty results with a dummy value that I filter after, but this was a weird error that took a while to track down. ### Steps to reproduce the bug ```python import datasets dataset = datasets.Dataset.from_list([{'idx':i} for i in range(60)]) def test_func(row, idx): if idx==58: return {'output': []} else: return {'output' : [{'test':1}, {'test':2}]} # this works fine test1 = dataset.map(lambda row, idx: test_func(row, idx), with_indices=True, num_proc=4) # this fails test2 = dataset.map(lambda row, idx: test_func(row, idx), with_indices=True, num_proc=32) >ValueError: The features can't be aligned because the key output of features {'idx': Value(dtype='int64', id=None), 'output': Sequence(feature=Value(dtype='null', id=None), length=-1, id=None)} has unexpected type - Sequence(feature=Value(dtype='null', id=None), length=-1, id=None) (expected either [{'test': Value(dtype='int64', id=None)}] or Value("null"). ``` The error occurs during the check ```python _check_if_features_can_be_aligned([dset.features for dset in dsets]) ``` When the multiprocessing splitting lines up just right with the empty return value, one of the `dset` in `dsets` will have a single item with an empty list value, causing the error. ### Expected behavior Expected behavior is the result would be the same regardless of the `num_proc` value used. ### Environment info Datasets version 2.11.0 Python 3.9.16
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6020/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6020/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6017
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6017/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6017/comments
https://api.github.com/repos/huggingface/datasets/issues/6017/events
https://github.com/huggingface/datasets/issues/6017
1,799,309,132
I_kwDODunzps5rP0dM
6,017
Switch to huggingface_hub's HfFileSystem
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[]
2023-07-11T16:24:40
2023-07-17T17:01:01
2023-07-17T17:01:01
MEMBER
null
null
null
instead of the current datasets.filesystems.hffilesystem.HfFileSystem which can be slow in some cases related to https://github.com/huggingface/datasets/issues/5846 and https://github.com/huggingface/datasets/pull/5919
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6017/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6017/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6014
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6014/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6014/comments
https://api.github.com/repos/huggingface/datasets/issues/6014/events
https://github.com/huggingface/datasets/issues/6014
1,798,213,816
I_kwDODunzps5rLpC4
6,014
Request to Share/Update Dataset Viewer Code
{ "login": "lilyorlilypad", "id": 105081034, "node_id": "U_kgDOBkNoyg", "avatar_url": "https://avatars.githubusercontent.com/u/105081034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lilyorlilypad", "html_url": "https://github.com/lilyorlilypad", "followers_url": "https://api.github.com/users/lilyorlilypad/followers", "following_url": "https://api.github.com/users/lilyorlilypad/following{/other_user}", "gists_url": "https://api.github.com/users/lilyorlilypad/gists{/gist_id}", "starred_url": "https://api.github.com/users/lilyorlilypad/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lilyorlilypad/subscriptions", "organizations_url": "https://api.github.com/users/lilyorlilypad/orgs", "repos_url": "https://api.github.com/users/lilyorlilypad/repos", "events_url": "https://api.github.com/users/lilyorlilypad/events{/privacy}", "received_events_url": "https://api.github.com/users/lilyorlilypad/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892865, "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate", "name": "duplicate", "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists" } ]
closed
false
null
[]
null
[ "Hi ! The huggingface/dataset-viewer code was not maintained anymore because we switched to a new dataset viewer that is deployed available for each dataset the Hugging Face website.\r\n\r\nWhat are you using this old repository for ?", "I think these parts are outdated:\r\n\r\n* https://github.com/huggingface/datasets-viewer/blob/8efad8eae313a891f713469983bf4c744786f26e/run.py#L126-L131\r\n* https://github.com/huggingface/datasets-viewer/blob/8efad8eae313a891f713469983bf4c744786f26e/run.py#L145-L150\r\n\r\nTo make the viewer work, the first one should be replaced with the following:\r\n```python\r\ndataset_module = datasets.load.dataset_module_factory(path)\r\nbuilder_cls = datasets.load.import_main_class(dataset_module.module_path)\r\nconfs = builder_cls.BUILDER_CONFIGS\r\n```\r\nAnd the second one:\r\n```python\r\ndataset_module = datasets.load.dataset_module_factory(path)\r\nbuilder_cls = datasets.load.import_main_class(dataset_module.module_path)\r\nif conf:\r\n builder_instance = builder_cls(name=conf, cache_dir=path if path_to_datasets is not None else None)\r\nelse:\r\n builder_instance = builder_cls(cache_dir=path if path_to_datasets is not None else None)\r\n```\r\n\r\nBut as @lhoestq suggested, it's better to use the `datasets-server` API nowadays to [fetch the rows](https://huggingface.co/docs/datasets-server/rows).", "> The dataset viewer on the Hugging Face website is incredibly useful\r\n\r\n@mariosasko i think @lilyorlilypad wants to run the new dataset-viewer, not the old one", "> wants to run the new dataset-viewer, not the old one\r\n\r\nThanks for the clarification for me. I do want to run the new dataset-viewer. ", "It should be possible to run it locally using the HF datasets-server API (docs [here](https://huggingface.co/docs/datasets-server)) but the front end part is not open source (yet ?)\r\n\r\nThe back-end is open source though if you're interested: https://github.com/huggingface/datasets-server\r\nIt automatically converts datasets on HF to Parquet, which is the format we use to power the viewer.", "the new frontend would probably be hard to open source, as is, as it's quite intertwined with the Hub's code.\r\n\r\nHowever, at some point it would be amazing to have a community-driven open source implementation of a frontend to datasets-server! ", "For the frontend viewer, see https://github.com/huggingface/datasets/issues/6139.\r\n\r\nAlso mentioned in https://github.com/huggingface/datasets-server/issues/213 and https://github.com/huggingface/datasets-server/issues/441\r\n\r\nClosing as a duplicate of https://github.com/huggingface/datasets/issues/6139" ]
2023-07-11T06:36:09
2023-09-25T12:01:27
2023-09-25T12:01:17
NONE
null
null
null
Overview: The repository (huggingface/datasets-viewer) was recently archived and when I tried to run the code, there was the error message "AttributeError: module 'datasets.load' has no attribute 'prepare_module'". I could not resolve the issue myself due to lack of documentation of that attribute. Request: I kindly request the sharing of the code responsible for the dataset preview functionality or help with resolving the error. The dataset viewer on the Hugging Face website is incredibly useful since it is compatible with different types of inputs. It allows users to find datasets that meet their needs more efficiently. If needed, I am willing to contribute to the project by testing, documenting, and providing feedback on the dataset viewer code. Thank you for considering this request, and I look forward to your response.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6014/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6014/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6013
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6013/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6013/comments
https://api.github.com/repos/huggingface/datasets/issues/6013/events
https://github.com/huggingface/datasets/issues/6013
1,796,083,437
I_kwDODunzps5rDg7t
6,013
[FR] `map` should reuse unchanged columns from the previous dataset to avoid disk usage
{ "login": "NightMachinery", "id": 36224762, "node_id": "MDQ6VXNlcjM2MjI0NzYy", "avatar_url": "https://avatars.githubusercontent.com/u/36224762?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NightMachinery", "html_url": "https://github.com/NightMachinery", "followers_url": "https://api.github.com/users/NightMachinery/followers", "following_url": "https://api.github.com/users/NightMachinery/following{/other_user}", "gists_url": "https://api.github.com/users/NightMachinery/gists{/gist_id}", "starred_url": "https://api.github.com/users/NightMachinery/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NightMachinery/subscriptions", "organizations_url": "https://api.github.com/users/NightMachinery/orgs", "repos_url": "https://api.github.com/users/NightMachinery/repos", "events_url": "https://api.github.com/users/NightMachinery/events{/privacy}", "received_events_url": "https://api.github.com/users/NightMachinery/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 3761482852, "node_id": "LA_kwDODunzps7gM6xk", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20second%20issue", "name": "good second issue", "color": "BDE59C", "default": false, "description": "Issues a bit more difficult than \"Good First\" issues" } ]
open
false
null
[]
null
[ "You can use the `remove_columns` parameter in `map` to avoid duplicating the columns (and save disk space) and then concatenate the original dataset with the map result:\r\n```python\r\nfrom datasets import concatenate_datasets\r\n# dummy example\r\nds_new = ds.map(lambda x: {\"new_col\": x[\"col\"] + 2}, remove_columns=ds.column_names)\r\nds_combined = concatenate_datasets([ds, ds_new], axis=1)\r\n```\r\n\r\nDoing this automatically is hard to implement efficiently unless we know ahead of time which existing columns will be modified by a `map` transform. We have this info when `input_columns` are specified, so I think this is the only case we can optimize." ]
2023-07-10T06:42:20
2023-07-10T15:37:52
null
CONTRIBUTOR
null
null
null
### Feature request Currently adding a new column with `map` will cause all the data in the dataset to be duplicated and stored/cached on the disk again. It should reuse unchanged columns. ### Motivation This allows having datasets with different columns but sharing some basic columns. Currently, these datasets would become too expensive to store and one would need some kind of on-the-fly join; which also doesn't seem implemented. ### Your contribution _
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6013/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6013/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6012
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6012/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6012/comments
https://api.github.com/repos/huggingface/datasets/issues/6012/events
https://github.com/huggingface/datasets/issues/6012
1,795,575,432
I_kwDODunzps5rBk6I
6,012
[FR] Transform Chaining, Lazy Mapping
{ "login": "NightMachinery", "id": 36224762, "node_id": "MDQ6VXNlcjM2MjI0NzYy", "avatar_url": "https://avatars.githubusercontent.com/u/36224762?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NightMachinery", "html_url": "https://github.com/NightMachinery", "followers_url": "https://api.github.com/users/NightMachinery/followers", "following_url": "https://api.github.com/users/NightMachinery/following{/other_user}", "gists_url": "https://api.github.com/users/NightMachinery/gists{/gist_id}", "starred_url": "https://api.github.com/users/NightMachinery/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NightMachinery/subscriptions", "organizations_url": "https://api.github.com/users/NightMachinery/orgs", "repos_url": "https://api.github.com/users/NightMachinery/repos", "events_url": "https://api.github.com/users/NightMachinery/events{/privacy}", "received_events_url": "https://api.github.com/users/NightMachinery/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "You can use `with_transform` to get a new dataset object.\r\n\r\nSupport for lazy `map` has already been discussed [here](https://github.com/huggingface/datasets/issues/3385) a little bit. Personally, I'm not a fan, as this would make `map` even more complex. ", "> You can use `with_transform` to get a new dataset object.\r\n> \r\n> Support for lazy `map` has already been discussed [here](https://github.com/huggingface/datasets/issues/3385) a little bit. Personally, I'm not a fan, as this would make `map` even more complex.\r\n\r\nI read about IterableDataset, and it seems to have lazy mapping. But I can't figure out how to convert an IterableDataset into a normal one when needed.\r\n\r\n`with_transform` still does not chain AFAIU.", "> I read about IterableDataset, and it seems to have lazy mapping. But I can't figure out how to convert an IterableDataset into a normal one when needed.\r\n\r\nYou must cache an `IterableDataset` to disk to load it as a `Dataset`. One way to do this is with `Dataset.from_generator`:\r\n```python\r\nfrom functools import partial\r\nfrom datasets import Dataset\r\n\r\ndef gen_from_iterable_dataset(iterable_ds)\r\n yield from iterable_ds\r\n\r\nds = Dataset.from_generator(partial(gen_from_iterable_dataset, iterable_ds), features=iterable_ds.features})\r\n```\r\n\r\n> with_transform still does not chain AFAIU.\r\n\r\nYes, not supported yet - the solution is to combine the transforms into a single one.", "I wonder if it would be beneficial to have a dedicated method to do that ? Maybe a `.save_to_disk()` so that the user can reload the resulting dataset later ?", "> ```python\r\n> from functools import partial\r\n> from datasets import Dataset\r\n> \r\n> def gen_from_iterable_dataset(iterable_ds)\r\n> yield from iterable_ds\r\n> \r\n> ds = Dataset.from_generator(partial(gen_from_iterable_dataset, iterable_ds), features=iterable_ds.features})\r\n> ```\r\n\r\n@mariosasko With these complex mapping functions, what hash will be used to cache this dataset?\r\n", "The params passed to `Dataset.from_generator` will be used to compute the hash (`partial` encapsulates the `iterable_ds` value, so changing it will also change the hash)", "Hi, I think this feature would be very useful. I want to concatenate large datasets with heterogeneous columns. I dislike `map` since I don't want multiple copy of that datasets locally. I tried to use \"set_transform\" on each dataset to convert it to a standard features format, but `datasets.concatenate_datasets` ignores the updated format of the datasets.  A work around is to use `torch.utils.data.ConcatDataset`. Is there a neat way to do it using HF datasets?" ]
2023-07-09T21:40:21
2023-11-23T10:08:57
null
CONTRIBUTOR
null
null
null
### Feature request Currently using a `map` call processes and duplicates the whole dataset, which takes both time and disk space. The solution is to allow lazy mapping, which is essentially a saved chain of transforms that are applied on the fly whenever a slice of the dataset is requested. The API should look like `map`, as `set_transform` changes the current dataset while `map` returns another dataset. ### Motivation Lazy processing allows lower disk usage and faster experimentation. ### Your contribution _
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6012/reactions", "total_count": 5, "+1": 5, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6012/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6011
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6011/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6011/comments
https://api.github.com/repos/huggingface/datasets/issues/6011/events
https://github.com/huggingface/datasets/issues/6011
1,795,296,568
I_kwDODunzps5rAg04
6,011
Documentation: wiki_dpr Dataset has no metric_type for Faiss Index
{ "login": "YichiRockyZhang", "id": 29335344, "node_id": "MDQ6VXNlcjI5MzM1MzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/29335344?v=4", "gravatar_id": "", "url": "https://api.github.com/users/YichiRockyZhang", "html_url": "https://github.com/YichiRockyZhang", "followers_url": "https://api.github.com/users/YichiRockyZhang/followers", "following_url": "https://api.github.com/users/YichiRockyZhang/following{/other_user}", "gists_url": "https://api.github.com/users/YichiRockyZhang/gists{/gist_id}", "starred_url": "https://api.github.com/users/YichiRockyZhang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/YichiRockyZhang/subscriptions", "organizations_url": "https://api.github.com/users/YichiRockyZhang/orgs", "repos_url": "https://api.github.com/users/YichiRockyZhang/repos", "events_url": "https://api.github.com/users/YichiRockyZhang/events{/privacy}", "received_events_url": "https://api.github.com/users/YichiRockyZhang/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! You can do `ds.get_index(\"embeddings\").faiss_index.metric_type` to get the metric type and then match the result with the FAISS metric [enum](https://github.com/facebookresearch/faiss/blob/43d86e30736ede853c384b24667fc3ab897d6ba9/faiss/MetricType.h#L22-L36) (should be L2).", "Ah! Thank you for pointing this out. FYI: the enum indicates it's using the inner product. Using `torch.inner` or `torch.dot` still produces a discrepancy compared to the built-in score. I think this is because of the compression/quantization that occurs with the FAISS index." ]
2023-07-09T08:30:19
2023-07-11T03:02:36
2023-07-11T03:02:36
NONE
null
null
null
### Describe the bug After loading `wiki_dpr` using: ```py ds = load_dataset(path='wiki_dpr', name='psgs_w100.multiset.compressed', split='train') print(ds.get_index("embeddings").metric_type) # prints nothing because the value is None ``` the index does not have a defined `metric_type`. This is an issue because I do not know how the `scores` are being computed for `get_nearest_examples()`. ### Steps to reproduce the bug System: Python 3.9.16, Transformers 4.30.2, WSL After loading `wiki_dpr` using: ```py ds = load_dataset(path='wiki_dpr', name='psgs_w100.multiset.compressed', split='train') print(ds.get_index("embeddings").metric_type) # prints nothing because the value is None ``` the index does not have a defined `metric_type`. This is an issue because I do not know how the `scores` are being computed for `get_nearest_examples()`. ```py from transformers import DPRQuestionEncoder, DPRContextEncoder, DPRQuestionEncoderTokenizer, DPRContextEncoderTokenizer tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-multiset-base") encoder = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-multiset-base") def encode_question(query, tokenizer=tokenizer, encoder=encoder): inputs = tokenizer(query, return_tensors='pt') question_embedding = encoder(**inputs)[0].detach().numpy() return question_embedding def get_knn(query, k=5, tokenizer=tokenizer, encoder=encoder, verbose=False): enc_question = encode_question(query, tokenizer, encoder) topk_results = ds.get_nearest_examples(index_name='embeddings', query=enc_question, k=k) a = torch.tensor(enc_question[0]).reshape(768) b = torch.tensor(topk_results.examples['embeddings'][0]) print(a.shape, b.shape) print(torch.dot(a, b)) print((a-b).pow(2).sum()) return topk_results ``` The [FAISS documentation](https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances) suggests the metric is usually L2 distance (without the square root) or the inner product. I compute both for the sample query: ```py query = """ it catapulted into popular culture along with a line of action figures and other toys by Bandai.[2] By 2001, the media franchise had generated over $6 billion in toy sales. Despite initial criticism that its action violence targeted child audiences, the franchise has been commercially successful.""" get_knn(query,k=5) ``` Here, I get dot product of 80.6020 and L2 distance of 77.6616 and ```py NearestExamplesResults(scores=array([76.20431 , 75.312416, 74.945404, 74.866394, 74.68506 ], dtype=float32), examples={'id': ['3081096', '2004811', '8908258', '9594124', '286575'], 'text': ['actors, resulting in the "Power Rangers" franchise which has continued since then into sequel TV series (with "Power Rangers Beast Morphers" set to premiere in 2019), comic books, video games, and three feature films, with a further cinematic universe planned. Following from the success of "Power Rangers", Saban acquired the rights to more of Toei\'s library, creating "VR Troopers" and "Big Bad Beetleborgs" from several Metal Hero Series shows and "Masked Rider" from Kamen Rider Series footage. DIC Entertainment joined this boom by acquiring the rights to "Gridman the Hyper Agent" and turning it into "Superhuman Samurai Syber-Squad". In 2002,', ``` Doing `k=1` indicates the higher the outputted number, the better the match, so the metric should not be L2 distance. However, my manually computed inner product (80.6) has a discrepancy with the reported (76.2). Perhaps, this has to do with me using the `compressed` embeddings? ### Expected behavior ```py ds = load_dataset(path='wiki_dpr', name='psgs_w100.multiset.compressed', split='train') print(ds.get_index("embeddings").metric_type) # METRIC_INNER_PRODUCT ``` ### Environment info - `datasets` version: 2.12.0 - Platform: Linux-4.18.0-477.13.1.el8_8.x86_64-x86_64-with-glibc2.28 - Python version: 3.9.16 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6011/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6011/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6010
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6010/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6010/comments
https://api.github.com/repos/huggingface/datasets/issues/6010/events
https://github.com/huggingface/datasets/issues/6010
1,793,838,152
I_kwDODunzps5q68xI
6,010
Improve `Dataset`'s string representation
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "I want to take a shot at this if possible ", "Yes, feel free to work on this.\r\n\r\nYou can check the PyArrow Table `__repr__` and Polars DataFrame `__repr__`/`_repr_html_` implementations for some pointers/ideas.", "@mariosasko are there any other similar issues that I could work on? I see this has been already solved. " ]
2023-07-07T16:38:03
2023-09-01T03:45:07
null
CONTRIBUTOR
null
null
null
Currently, `Dataset.__repr__` outputs a dataset's column names and the number of rows. We could improve it by printing its features and the first few rows. We should also implement `_repr_html_` to have a rich HTML representation in notebooks/Streamlit.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6010/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6010/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6008
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6008/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6008/comments
https://api.github.com/repos/huggingface/datasets/issues/6008/events
https://github.com/huggingface/datasets/issues/6008
1,789,869,344
I_kwDODunzps5qrz0g
6,008
Dataset.from_generator consistently freezes at ~1000 rows
{ "login": "andreemic", "id": 27695722, "node_id": "MDQ6VXNlcjI3Njk1NzIy", "avatar_url": "https://avatars.githubusercontent.com/u/27695722?v=4", "gravatar_id": "", "url": "https://api.github.com/users/andreemic", "html_url": "https://github.com/andreemic", "followers_url": "https://api.github.com/users/andreemic/followers", "following_url": "https://api.github.com/users/andreemic/following{/other_user}", "gists_url": "https://api.github.com/users/andreemic/gists{/gist_id}", "starred_url": "https://api.github.com/users/andreemic/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/andreemic/subscriptions", "organizations_url": "https://api.github.com/users/andreemic/orgs", "repos_url": "https://api.github.com/users/andreemic/repos", "events_url": "https://api.github.com/users/andreemic/events{/privacy}", "received_events_url": "https://api.github.com/users/andreemic/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "By default, we write data to disk (so it can be memory-mapped) every 1000 rows/samples. You can control this with the `writer_batch_size` parameter. Also, when working with fixed-size arrays, the `ArrayXD` feature types yield better performance (e.g., in your case, `features=datasets.Features({\"i\": datasets.Array3D(shape=(512,512,3), dtype=\"float32\")})` should be faster).\r\n\r\nOur support for multi-dim arrays could be better, and we plan to improve it as part of https://github.com/huggingface/datasets/issues/5272.", "> By default, we write data to disk (so it can be memory-mapped) every 1000 rows/samples. You can control this with the `writer_batch_size` parameter. Also, when working with fixed-size arrays, the `ArrayXD` feature types yield better performance (e.g., in your case, `features=datasets.Features({\"i\": datasets.Array3D(shape=(512,512,3), dtype=\"float32\")})` should be faster).\r\n> \r\n> Our support for multi-dim arrays could be better, and we plan to improve it as part of #5272.\r\n\r\nThanks for the explanation! The Image array was just for demonstration, I use PIL Images in practice. Does that make a difference? What's the best approach for a dataset with PIL Images as rows?", "It's best to use the `datasets.Image()` feature type for PIL images (to save space) :)" ]
2023-07-05T16:06:48
2023-07-10T13:46:39
2023-07-10T13:46:39
NONE
null
null
null
### Describe the bug Whenever I try to create a dataset which contains images using `Dataset.from_generator`, it freezes around 996 rows. I suppose it has something to do with memory consumption, but there's more memory available. I Somehow it worked a few times but mostly this makes the datasets library much more cumbersome to work with because generators are the easiest way to turn an existing dataset into a Hugging Face dataset. I've let it run in the frozen state for way longer than it can possibly take to load the actual dataset. Let me know if you have ideas how to resolve it! ### Steps to reproduce the bug ```python from datasets import Dataset import numpy as np def gen(): for row in range(10000): yield {"i": np.random.rand(512, 512, 3)} Dataset.from_generator(gen) # -> 90% of the time gets stuck around 1000 rows ``` ### Expected behavior Should continue and go through all the examples yielded by the generator, or at least throw an error or somehow communicate what's going on. ### Environment info - `datasets` version: 2.8.0 - Platform: Linux-5.15.0-52-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 12.0.1 - Pandas version: 1.5.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6008/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6008/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6007
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6007/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6007/comments
https://api.github.com/repos/huggingface/datasets/issues/6007/events
https://github.com/huggingface/datasets/issues/6007
1,789,782,693
I_kwDODunzps5qreql
6,007
Get an error "OverflowError: Python int too large to convert to C long" when loading a large dataset
{ "login": "silverriver", "id": 2529049, "node_id": "MDQ6VXNlcjI1MjkwNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/2529049?v=4", "gravatar_id": "", "url": "https://api.github.com/users/silverriver", "html_url": "https://github.com/silverriver", "followers_url": "https://api.github.com/users/silverriver/followers", "following_url": "https://api.github.com/users/silverriver/following{/other_user}", "gists_url": "https://api.github.com/users/silverriver/gists{/gist_id}", "starred_url": "https://api.github.com/users/silverriver/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/silverriver/subscriptions", "organizations_url": "https://api.github.com/users/silverriver/orgs", "repos_url": "https://api.github.com/users/silverriver/repos", "events_url": "https://api.github.com/users/silverriver/events{/privacy}", "received_events_url": "https://api.github.com/users/silverriver/received_events", "type": "User", "site_admin": false }
[ { "id": 5705560427, "node_id": "LA_kwDODunzps8AAAABVBPxaw", "url": "https://api.github.com/repos/huggingface/datasets/labels/arrow", "name": "arrow", "color": "c2e0c6", "default": false, "description": "Related to Apache Arrow" } ]
open
false
null
[]
null
[ "This error means that one of the int32 (`Value(\"int32\")`) columns in the dataset has a value that is out of the valid (int32) range.\r\n\r\nI'll open a PR to print the name of a problematic column to make debugging such errors easier.", "I am afraid int32 is not the reason for this error.\r\n\r\nI have submitted a commit to use int64 for all ints in the dataset:\r\nhttps://huggingface.co/datasets/liwu/MNBVC/commit/857ac00d9eab96a6708ad6a82bd9001686042a9e\r\n\r\nand I have updated my env to the latest datasets release:\r\nCopy-and-paste the text below in your GitHub issue.\r\n\r\n- `datasets` version: 2.13.1\r\n- Platform: macOS-13.2.1-arm64-arm-64bit\r\n- Python version: 3.11.2\r\n- Huggingface_hub version: 0.13.4\r\n- PyArrow version: 11.0.0\r\n- Pandas version: 1.5.3\r\n\r\nBut the error still exist\r\n\r\n```\r\nDownloading and preparing dataset mnbvc/news_peoples_daily to /Users/silver/.cache/huggingface/datasets/liwu___mnbvc/news_peoples_daily/0.0.1/ee380f6309fe9b8b0d1fb14d77118f132444f22c8c4b28bf5c1645312688e051...\r\nDownloading data files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12/12 [00:00<00:00, 9070.40it/s]\r\nExtracting data files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12/12 [00:00<00:00, 2697.16it/s]\r\n---------------------------------------------------------------------------\r\nOverflowError Traceback (most recent call last)\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:1647, in GeneratorBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\r\n 1646 example = self.info.features.encode_example(record) if self.info.features is not None else record\r\n-> 1647 writer.write(example, key)\r\n 1648 num_examples_progress_update += 1\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:490, in ArrowWriter.write(self, example, key, writer_batch_size)\r\n 488 self.hkey_record = []\r\n--> 490 self.write_examples_on_file()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:448, in ArrowWriter.write_examples_on_file(self)\r\n 444 batch_examples[col] = [\r\n 445 row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]\r\n 446 for row in self.current_examples\r\n 447 ]\r\n--> 448 self.write_batch(batch_examples=batch_examples)\r\n 449 self.current_examples = []\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:553, in ArrowWriter.write_batch(self, batch_examples, writer_batch_size)\r\n 552 typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)\r\n--> 553 arrays.append(pa.array(typed_sequence))\r\n 554 inferred_features[col] = typed_sequence.get_inferred_type()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:236, in pyarrow.lib.array()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:110, in pyarrow.lib._handle_arrow_array_protocol()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:189, in TypedSequence.__arrow_array__(self, type)\r\n 188 trying_cast_to_python_objects = True\r\n--> 189 out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))\r\n 190 # use smaller integer precisions if possible\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:320, in pyarrow.lib.array()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:39, in pyarrow.lib._sequence_to_array()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/error.pxi:144, in pyarrow.lib.pyarrow_internal_check_status()\r\n\r\nOverflowError: Python int too large to convert to C long\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nOverflowError Traceback (most recent call last)\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:1656, in GeneratorBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\r\n 1655 num_shards = shard_id + 1\r\n-> 1656 num_examples, num_bytes = writer.finalize()\r\n 1657 writer.close()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:584, in ArrowWriter.finalize(self, close_stream)\r\n 583 self.hkey_record = []\r\n--> 584 self.write_examples_on_file()\r\n 585 # If schema is known, infer features even if no examples were written\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:448, in ArrowWriter.write_examples_on_file(self)\r\n 444 batch_examples[col] = [\r\n 445 row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]\r\n 446 for row in self.current_examples\r\n 447 ]\r\n--> 448 self.write_batch(batch_examples=batch_examples)\r\n 449 self.current_examples = []\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:553, in ArrowWriter.write_batch(self, batch_examples, writer_batch_size)\r\n 552 typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)\r\n--> 553 arrays.append(pa.array(typed_sequence))\r\n 554 inferred_features[col] = typed_sequence.get_inferred_type()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:236, in pyarrow.lib.array()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:110, in pyarrow.lib._handle_arrow_array_protocol()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/arrow_writer.py:189, in TypedSequence.__arrow_array__(self, type)\r\n 188 trying_cast_to_python_objects = True\r\n--> 189 out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))\r\n 190 # use smaller integer precisions if possible\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:320, in pyarrow.lib.array()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/array.pxi:39, in pyarrow.lib._sequence_to_array()\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/pyarrow/error.pxi:144, in pyarrow.lib.pyarrow_internal_check_status()\r\n\r\nOverflowError: Python int too large to convert to C long\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nDatasetGenerationError Traceback (most recent call last)\r\nCell In[2], line 1\r\n----> 1 dataset = load_dataset(\"liwu/MNBVC\", 'news_peoples_daily', split='train')\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/load.py:1809, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)\r\n 1806 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES\r\n 1808 # Download and prepare data\r\n-> 1809 builder_instance.download_and_prepare(\r\n 1810 download_config=download_config,\r\n 1811 download_mode=download_mode,\r\n 1812 verification_mode=verification_mode,\r\n 1813 try_from_hf_gcs=try_from_hf_gcs,\r\n 1814 num_proc=num_proc,\r\n 1815 storage_options=storage_options,\r\n 1816 )\r\n 1818 # Build dataset for splits\r\n 1819 keep_in_memory = (\r\n 1820 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)\r\n 1821 )\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:909, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)\r\n 907 if num_proc is not None:\r\n 908 prepare_split_kwargs[\"num_proc\"] = num_proc\r\n--> 909 self._download_and_prepare(\r\n 910 dl_manager=dl_manager,\r\n 911 verification_mode=verification_mode,\r\n 912 **prepare_split_kwargs,\r\n 913 **download_and_prepare_kwargs,\r\n 914 )\r\n 915 # Sync info\r\n 916 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:1670, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs)\r\n 1669 def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):\r\n-> 1670 super()._download_and_prepare(\r\n 1671 dl_manager,\r\n 1672 verification_mode,\r\n 1673 check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS\r\n 1674 or verification_mode == VerificationMode.ALL_CHECKS,\r\n 1675 **prepare_splits_kwargs,\r\n 1676 )\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:1004, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)\r\n 1000 split_dict.add(split_generator.split_info)\r\n 1002 try:\r\n 1003 # Prepare split will record examples associated to the split\r\n-> 1004 self._prepare_split(split_generator, **prepare_split_kwargs)\r\n 1005 except OSError as e:\r\n 1006 raise OSError(\r\n 1007 \"Cannot find data file. \"\r\n 1008 + (self.manual_download_instructions or \"\")\r\n 1009 + \"\\nOriginal error:\\n\"\r\n 1010 + str(e)\r\n 1011 ) from None\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:1508, in GeneratorBasedBuilder._prepare_split(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size)\r\n 1506 job_id = 0\r\n 1507 with pbar:\r\n-> 1508 for job_id, done, content in self._prepare_split_single(\r\n 1509 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args\r\n 1510 ):\r\n 1511 if done:\r\n 1512 result = content\r\n\r\nFile ~/git/venv/lib/python3.11/site-packages/datasets/builder.py:1665, in GeneratorBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)\r\n 1663 if isinstance(e, SchemaInferenceError) and e.__context__ is not None:\r\n 1664 e = e.__context__\r\n-> 1665 raise DatasetGenerationError(\"An error occurred while generating the dataset\") from e\r\n 1667 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)\r\n\r\nDatasetGenerationError: An error occurred while generating the dataset\r\n```\r\n\r\nBesides, it works fine when I am using streamed dataset.", "`simhash` is the problematic column - it has values such as `18329103420363166823` that are out of the int64 range. You can fix this by setting the feature type to `Value(\"string\")` (it's advised to use this type for hash values in general)\r\n\r\n> Besides, it works fine when I am using streamed dataset.\r\n\r\nStreaming yields Python dictionaries from the script without converting them to the Arrow representation, as this conversion step is not that cheap performance-wise.", "i am using uint64 for simhash\r\n\r\nuint64 ranges up to about 3.69E19.\r\n\r\n18329103420363166823 is less than this value.\r\n\r\nmoreover, our simhash algorithm use 64 bits. it should fit in uint64.\r\n\r\n\r\n\r\n", "You are right. I overlooked the feature type.\r\n\r\nThis is a reproducer:\r\n```python\r\nimport pyarrow as pa\r\nfrom datasets.arrow_writer import TypedSequence\r\n\r\npa.array(TypedSequence([18329103420363166823], type=Value(\"uint64\")))\r\n```\r\n\r\n`pa.array([18329103420363166823])` also fails with the same error, so it seems PyArrow does not always infer the correct type as NumPy does (`uint64` in this case).\r\n\r\nI'll report this issue in the Arrow repo.\r\n\r\n`pa.array([18329103420363166823], pa.uint64)` works, so maybe we can implement a temporary fix (supporting complex input such as `[{\"image\": pil_image, \"num\": uint64_value}]` would be hard though).\r\n\r\nIn the meantime, you should be able to bypass this error by returning the `simhash` values as NumPy scalars in the script:\r\n```python\r\ndef _generate_examples(self, ...):\r\n ...\r\n yield {..., \"simhash\": np.uint64(simhash), ...}\r\n```", "Thank you for checking this issue in detail.\r\n\r\nHowever, it seems that using `np.uint64(simhash)` does not work. The same issue still exists.\r\n\r\nhttps://huggingface.co/datasets/liwu/MNBVC/commit/1e44f1e400b7e61052647d44c99cdae3bae9c830\r\n\r\nAnyway, we decide to use string type for these simhash values. Hope pyarrow can fix their bug soon.", "Arrow issue: https://github.com/apache/arrow/issues/36520", "May be something read your training data line by line.\r\nThen your training data just only one line. \r\nIt is so large.\r\nI guess.\r\n" ]
2023-07-05T15:16:50
2024-02-07T22:22:35
null
CONTRIBUTOR
null
null
null
### Describe the bug When load a large dataset with the following code ```python from datasets import load_dataset dataset = load_dataset("liwu/MNBVC", 'news_peoples_daily', split='train') ``` We encountered the error: "OverflowError: Python int too large to convert to C long" The error look something like: ``` OverflowError: Python int too large to convert to C long During handling of the above exception, another exception occurred: OverflowError Traceback (most recent call last) <ipython-input-7-0ed8700e662d> in <module> ----> 1 dataset = load_dataset("liwu/MNBVC", 'news_peoples_daily', split='train', cache_dir='/sfs/MNBVC/.cache/') /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1749 ignore_verifications=ignore_verifications, 1750 try_from_hf_gcs=try_from_hf_gcs, -> 1751 use_auth_token=use_auth_token, 1752 ) 1753 /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 703 if not downloaded_from_gcs: 704 self._download_and_prepare( --> 705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 706 ) 707 # Sync info /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos) 1225 1226 def _download_and_prepare(self, dl_manager, verify_infos): -> 1227 super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) 1228 1229 def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 791 try: 792 # Prepare split will record examples associated to the split --> 793 self._prepare_split(split_generator, **prepare_split_kwargs) 794 except OSError as e: 795 raise OSError( /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/builder.py in _prepare_split(self, split_generator, check_duplicate_keys) 1219 writer.write(example, key) 1220 finally: -> 1221 num_examples, num_bytes = writer.finalize() 1222 1223 split_generator.split_info.num_examples = num_examples /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/arrow_writer.py in finalize(self, close_stream) 536 # Re-intializing to empty list for next batch 537 self.hkey_record = [] --> 538 self.write_examples_on_file() 539 if self.pa_writer is None: 540 if self.schema: /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/arrow_writer.py in write_examples_on_file(self) 407 # Since current_examples contains (example, key) tuples 408 batch_examples[col] = [row[0][col] for row in self.current_examples] --> 409 self.write_batch(batch_examples=batch_examples) 410 self.current_examples = [] 411 /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/arrow_writer.py in write_batch(self, batch_examples, writer_batch_size) 506 col_try_type = try_features[col] if try_features is not None and col in try_features else None 507 typed_sequence = OptimizedTypedSequence(batch_examples[col], type=col_type, try_type=col_try_type, col=col) --> 508 arrays.append(pa.array(typed_sequence)) 509 inferred_features[col] = typed_sequence.get_inferred_type() 510 schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema /sfs/MNBVC/venv/lib64/python3.6/site-packages/pyarrow/array.pxi in pyarrow.lib.array() /sfs/MNBVC/venv/lib64/python3.6/site-packages/pyarrow/array.pxi in pyarrow.lib._handle_arrow_array_protocol() /sfs/MNBVC/venv/lib64/python3.6/site-packages/datasets/arrow_writer.py in __arrow_array__(self, type) 180 else: 181 trying_cast_to_python_objects = True --> 182 out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True)) 183 # use smaller integer precisions if possible 184 if self.trying_int_optimization: /sfs/MNBVC/venv/lib64/python3.6/site-packages/pyarrow/array.pxi in pyarrow.lib.array() /sfs/MNBVC/venv/lib64/python3.6/site-packages/pyarrow/array.pxi in pyarrow.lib._sequence_to_array() /sfs/MNBVC/venv/lib64/python3.6/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() OverflowError: Python int too large to convert to C long ``` However, that dataset can be loaded in a streaming manner: ```python from datasets import load_dataset dataset = load_dataset("liwu/MNBVC", 'news_peoples_daily', split='train', streaming=True) for i in dataset: pass # it work well ``` Another issue is reported in our dataset hub: https://huggingface.co/datasets/liwu/MNBVC/discussions/2 ### Steps to reproduce the bug from datasets import load_dataset dataset = load_dataset("liwu/MNBVC", 'news_peoples_daily', split='train') ### Expected behavior the dataset can be safely loaded ### Environment info - `datasets` version: 2.4.0 - Platform: Linux-3.10.0-1160.an7.x86_64-x86_64-with-centos-7.9 - Python version: 3.6.8 - PyArrow version: 6.0.1 - Pandas version: 1.1.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6007/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6007/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/6006
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6006/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6006/comments
https://api.github.com/repos/huggingface/datasets/issues/6006/events
https://github.com/huggingface/datasets/issues/6006
1,788,855,582
I_kwDODunzps5qn8Ue
6,006
NotADirectoryError when loading gigawords
{ "login": "xipq", "id": 115634163, "node_id": "U_kgDOBuRv8w", "avatar_url": "https://avatars.githubusercontent.com/u/115634163?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xipq", "html_url": "https://github.com/xipq", "followers_url": "https://api.github.com/users/xipq/followers", "following_url": "https://api.github.com/users/xipq/following{/other_user}", "gists_url": "https://api.github.com/users/xipq/gists{/gist_id}", "starred_url": "https://api.github.com/users/xipq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xipq/subscriptions", "organizations_url": "https://api.github.com/users/xipq/orgs", "repos_url": "https://api.github.com/users/xipq/repos", "events_url": "https://api.github.com/users/xipq/events{/privacy}", "received_events_url": "https://api.github.com/users/xipq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "issue due to corrupted download files. resolved after cleaning download cache. sorry for any inconvinence." ]
2023-07-05T06:23:41
2023-07-05T06:31:02
2023-07-05T06:31:01
NONE
null
null
null
### Describe the bug got `NotADirectoryError` whtn loading gigawords dataset ### Steps to reproduce the bug When running ``` import datasets datasets.load_dataset('gigaword') ``` Got the following exception: ```bash Traceback (most recent call last): [0/1862] File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/builder.py", line 1629, in _prepare_split_single for key, record in generator: File "/home/x/.cache/huggingface/modules/datasets_modules/datasets/gigaword/ea83a8b819190acac5f2dae011fad51dccf269a0604ec5dd24795b 64efb424b6/gigaword.py", line 115, in _generate_examples with open(src_path, encoding="utf-8") as f_d, open(tgt_path, encoding="utf-8") as f_s: File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/streaming.py", line 71, in wrapper return function(*args, use_auth_token=use_auth_token, **kwargs) File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/download/streaming_download_manager.py", line 493, in xope n return open(main_hop, mode, *args, **kwargs) NotADirectoryError: [Errno 20] Not a directory: '/home/x/.cache/huggingface/datasets/downloads/6da52431bb5124d90cf51a0187d2dbee9046e 89780c4be7599794a4f559048ec/org_data/train.src.txt' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "gigaword.py", line 38, in <module> main() File "gigaword.py", line 35, in main train, dev, test = dataset.generate_k_shot_data(k=32, seed=seed, path="../data/") File "/home/x/MICL/preprocess/fewshot_gym_dataset.py", line 199, in generate_k_shot_data dataset = self.load_dataset() File "gigaword.py", line 29, in load_dataset return datasets.load_dataset('gigaword') File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/load.py", line 1809, in load_dataset builder_instance.download_and_prepare( File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/builder.py", line 909, in download_and_prepare self._download_and_prepare( File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/builder.py", line 1670, in _download_and_prepare super()._download_and_prepare( File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/builder.py", line 1004, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/builder.py", line 1508, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/home/x/.conda/envs/dataproc/lib/python3.8/site-packages/datasets/builder.py", line 1665, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.builder.DatasetGenerationError: An error occurred while generating the dataset ``` ### Expected behavior Download and process the dataset successfully ### Environment info - `datasets` version: 2.13.1 - Platform: Linux-5.0.0-1032-azure-x86_64-with-glibc2.10 - Python version: 3.8.0 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.1 - Pandas version: 2.0.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6006/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6006/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/6003
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6003/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6003/comments
https://api.github.com/repos/huggingface/datasets/issues/6003/events
https://github.com/huggingface/datasets/issues/6003
1,786,554,110
I_kwDODunzps5qfKb-
6,003
interleave_datasets & DataCollatorForLanguageModeling having a conflict ?
{ "login": "PonteIneptique", "id": 1929830, "node_id": "MDQ6VXNlcjE5Mjk4MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/1929830?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PonteIneptique", "html_url": "https://github.com/PonteIneptique", "followers_url": "https://api.github.com/users/PonteIneptique/followers", "following_url": "https://api.github.com/users/PonteIneptique/following{/other_user}", "gists_url": "https://api.github.com/users/PonteIneptique/gists{/gist_id}", "starred_url": "https://api.github.com/users/PonteIneptique/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PonteIneptique/subscriptions", "organizations_url": "https://api.github.com/users/PonteIneptique/orgs", "repos_url": "https://api.github.com/users/PonteIneptique/repos", "events_url": "https://api.github.com/users/PonteIneptique/events{/privacy}", "received_events_url": "https://api.github.com/users/PonteIneptique/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[]
2023-07-03T17:15:31
2023-07-03T17:15:31
null
NONE
null
null
null
### Describe the bug Hi everyone :) I have two local & custom datasets (1 "sentence" per line) which I split along the 95/5 lines for pre-training a Bert model. I use a modified version of `run_mlm.py` in order to be able to make use of `interleave_dataset`: - `tokenize()` runs fine - `group_text()` runs fine Everytime, on step 19, I get ```pytb File "env/lib/python3.9/site-packages/transformers/data/data_collator.py", line 779, in torch_mask_tokens inputs[indices_random] = random_words[indices_random] RuntimeError: Index put requires the source and destination dtypes match, got Float for the destination and Long for the source. ``` I tried: - training without interleave on dataset 1, it runs - training without interleave on dataset 2, it runs - training without `.to_iterable_dataset()`, it hangs then crash - training without group_text() and padding to max_length seemed to fix the issue, but who knows if this was just because it was an issue that would come much later in terms of steps. I might have coded something wrong, but I don't get what ### Steps to reproduce the bug I have this function: ```py def build_dataset(path: str, percent: str): dataset = load_dataset( "text", data_files={"train": [path]}, split=f"train[{percent}]" ) dataset = dataset.map( lambda examples: tokenize(examples["text"]), batched=True, num_proc=num_proc, ) dataset = dataset.map( group_texts, batched=True, num_proc=num_proc, desc=f"Grouping texts in chunks of {tokenizer.max_seq_length}", remove_columns=["text"] ) print(len(dataset)) return dataset.to_iterable_dataset() ``` I hardcoded group_text: ```py def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, and if the total_length < max_seq_length we exclude this batch and return an empty dict. # We could add padding if the model supported it instead of this drop, you can customize this part to your needs. total_length = (total_length // 512) * 512 # Split by chunks of max_len. result = { k: [t[i: i + 512] for i in range(0, total_length, 512)] for k, t in concatenated_examples.items() } # result = {k: [el for el in elements if el] for k, elements in result.items()} return result ``` And then I build datasets using the following code: ```py train1 = build_dataset("d1.txt", ":95%") train2 = build_dataset("d2.txt", ":95%") dev1 = build_dataset("d1.txt", "95%:") dev2 = build_dataset("d2.txt", "95%:") ``` and finally I run ```py train_dataset = interleave_datasets( [train1, train2], probabilities=[0.8, 0.2], seed=42 ) eval_dataset = interleave_datasets( [dev1, dev2], probabilities=[0.8, 0.2], seed=42 ) ``` Then I run the training part which remains mostly untouched: > CUDA_VISIBLE_DEVICES=1 python custom_dataset.py --model_type bert --per_device_train_batch_size 32 --do_train --output_dir /var/mlm/training-bert/model --max_seq_length 512 --save_steps 10000 --save_total_limit 3 --auto_find_batch_size --logging_dir ./logs-bert --learning_rate 0.0001 --do_train --num_train_epochs 25 --warmup_steps 10000 --max_step 45000 --fp16 ### Expected behavior The model should then train normally, but fails every time at the same step (19). printing the variables at `inputs[indices_random] = random_words[indices_random]` shows a magnificient empty tensor (, 32) [if I remember well] ### Environment info transformers[torch] 4.30.2 Ubuntu A100 0 CUDA 12 Driver Version: 525.116.04
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/6003/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/6003/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5999
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5999/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5999/comments
https://api.github.com/repos/huggingface/datasets/issues/5999/events
https://github.com/huggingface/datasets/issues/5999
1,781,851,513
I_kwDODunzps5qNOV5
5,999
Getting a 409 error while loading xglue dataset
{ "login": "Praful932", "id": 45713796, "node_id": "MDQ6VXNlcjQ1NzEzNzk2", "avatar_url": "https://avatars.githubusercontent.com/u/45713796?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Praful932", "html_url": "https://github.com/Praful932", "followers_url": "https://api.github.com/users/Praful932/followers", "following_url": "https://api.github.com/users/Praful932/following{/other_user}", "gists_url": "https://api.github.com/users/Praful932/gists{/gist_id}", "starred_url": "https://api.github.com/users/Praful932/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Praful932/subscriptions", "organizations_url": "https://api.github.com/users/Praful932/orgs", "repos_url": "https://api.github.com/users/Praful932/repos", "events_url": "https://api.github.com/users/Praful932/events{/privacy}", "received_events_url": "https://api.github.com/users/Praful932/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @Praful932.\r\n\r\nLet's continue the conversation on the Hub: https://huggingface.co/datasets/xglue/discussions/5" ]
2023-06-30T04:13:54
2023-06-30T05:57:23
2023-06-30T05:57:22
NONE
null
null
null
### Describe the bug Unable to load xglue dataset ### Steps to reproduce the bug ```python import datasets dataset = datasets.load_dataset("xglue", "ntg") ``` > ConnectionError: Couldn't reach https://xglue.blob.core.windows.net/xglue/xglue_full_dataset.tar.gz (error 409) ### Expected behavior Expected the dataset to load ### Environment info - `datasets` version: 2.13.1 - Platform: Linux-5.15.107+-x86_64-with-glibc2.31 - Python version: 3.10.12 - Huggingface_hub version: 0.15.1 - PyArrow version: 9.0.0 - Pandas version: 1.5.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5999/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5999/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5998
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5998/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5998/comments
https://api.github.com/repos/huggingface/datasets/issues/5998/events
https://github.com/huggingface/datasets/issues/5998
1,781,805,018
I_kwDODunzps5qNC_a
5,998
The current implementation has a potential bug in the sort method
{ "login": "wangyuxinwhy", "id": 22192665, "node_id": "MDQ6VXNlcjIyMTkyNjY1", "avatar_url": "https://avatars.githubusercontent.com/u/22192665?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wangyuxinwhy", "html_url": "https://github.com/wangyuxinwhy", "followers_url": "https://api.github.com/users/wangyuxinwhy/followers", "following_url": "https://api.github.com/users/wangyuxinwhy/following{/other_user}", "gists_url": "https://api.github.com/users/wangyuxinwhy/gists{/gist_id}", "starred_url": "https://api.github.com/users/wangyuxinwhy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wangyuxinwhy/subscriptions", "organizations_url": "https://api.github.com/users/wangyuxinwhy/orgs", "repos_url": "https://api.github.com/users/wangyuxinwhy/repos", "events_url": "https://api.github.com/users/wangyuxinwhy/events{/privacy}", "received_events_url": "https://api.github.com/users/wangyuxinwhy/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Thanks for reporting, @wangyuxinwhy. " ]
2023-06-30T03:16:57
2023-06-30T14:21:03
2023-06-30T14:11:25
NONE
null
null
null
### Describe the bug In the sort method,here's a piece of code ```python # column_names: Union[str, Sequence_[str]] # Check proper format of and for duplicates in column_names if not isinstance(column_names, list): column_names = [column_names] ``` I get an error when I pass in a tuple based on the column_names type annotation, it will raise an errror.As in the example below, while the type annotation implies that a tuple can be passed. ```python from datasets import load_dataset dataset = load_dataset('glue', 'ax')['test'] dataset.sort(column_names=('premise', 'hypothesis')) # Raise ValueError: Column '('premise', 'hypothesis')' not found in the dataset. ``` Of course, after I modified the tuple into a list, everything worked fine Change the code to the following so there will be no problem ```python # Check proper format of and for duplicates in column_names if not isinstance(column_names, list): if isinstance(column_names, str): column_names = [column_names] else: column_names = list(column_names) ``` ### Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset('glue', 'ax')['test'] dataset.sort(column_names=('premise', 'hypothesis')) # Raise ValueError: Column '('premise', 'hypothesis')' not found in the dataset. ``` ### Expected behavior Passing tuple into column_names should be equivalent to passing list ### Environment info - `datasets` version: 2.13.0 - Platform: macOS-13.1-arm64-arm-64bit - Python version: 3.10.11 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.1 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5998/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5998/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5997
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5997/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5997/comments
https://api.github.com/repos/huggingface/datasets/issues/5997/events
https://github.com/huggingface/datasets/issues/5997
1,781,582,818
I_kwDODunzps5qMMvi
5,997
extend the map function so it can wrap around long text that does not fit in the context window
{ "login": "siddhsql", "id": 127623723, "node_id": "U_kgDOB5tiKw", "avatar_url": "https://avatars.githubusercontent.com/u/127623723?v=4", "gravatar_id": "", "url": "https://api.github.com/users/siddhsql", "html_url": "https://github.com/siddhsql", "followers_url": "https://api.github.com/users/siddhsql/followers", "following_url": "https://api.github.com/users/siddhsql/following{/other_user}", "gists_url": "https://api.github.com/users/siddhsql/gists{/gist_id}", "starred_url": "https://api.github.com/users/siddhsql/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/siddhsql/subscriptions", "organizations_url": "https://api.github.com/users/siddhsql/orgs", "repos_url": "https://api.github.com/users/siddhsql/repos", "events_url": "https://api.github.com/users/siddhsql/events{/privacy}", "received_events_url": "https://api.github.com/users/siddhsql/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "I just noticed the [docs](https://github.com/huggingface/datasets/blob/main/src/datasets/arrow_dataset.py#L2881C11-L2881C200) say:\r\n\r\n>If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.\r\n\r\nso maybe this is a bug then.", "All the values in a batch must be of the same length. So one solution is dropping all the input columns:\r\n```python\r\ndata = data.map(lambda samples: tokenizer(samples[\"text\"], max_length=tokenizer.model_max_length, truncation=True, stride=4, return_overflowing_tokens=True), batched=True, remove_columns=data.column_names)\r\n```\r\n\r\nAnother is padding/transforming the input columns to the tokenizer output's length (447). " ]
2023-06-29T22:15:21
2023-07-03T17:58:52
null
NONE
null
null
null
### Feature request I understand `dataset` provides a [`map`](https://github.com/huggingface/datasets/blob/main/src/datasets/arrow_dataset.py#L2849) function. This function in turn takes in a callable that is used to tokenize the text on which a model is trained. Frequently this text will not fit within a models's context window. In this case it would be useful to wrap around the text into multiple rows with each row fitting the model's context window. I tried to do it using this code as example which in turn I have borrowed from [here](https://stackoverflow.com/a/76343993/147530): ``` data = data.map(lambda samples: tokenizer(samples["text"], max_length=tokenizer.model_max_length, truncation=True, stride=4, return_overflowing_tokens=True), batched=True) ``` but running the code gives me this error: ``` File "/llm/fine-tune.py", line 117, in <module> data = data.map(lambda samples: tokenizer(samples["text"], max_length=tokenizer.model_max_length, truncation=True, stride=4, return_overflowing_tokens=True), batched=True) File "/llm/.env/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 580, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/llm/.env/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 545, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/llm/.env/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 3087, in map for rank, done, content in Dataset._map_single(**dataset_kwargs): File "/llm/.env/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 3480, in _map_single writer.write_batch(batch) File "/llm/.env/lib/python3.9/site-packages/datasets/arrow_writer.py", line 556, in write_batch pa_table = pa.Table.from_arrays(arrays, schema=schema) File "pyarrow/table.pxi", line 3798, in pyarrow.lib.Table.from_arrays File "pyarrow/table.pxi", line 2962, in pyarrow.lib.Table.validate File "pyarrow/error.pxi", line 100, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Column 1 named input_ids expected length 394 but got length 447 ``` The lambda function I have provided is correctly chopping up long text so it wraps around (and because of this 394 samples become 447 after wrap around) but the dataset `map` function does not like it. ### Motivation please see above ### Your contribution I'm afraid I don't have much knowledge to help
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5997/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5997/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5993
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5993/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5993/comments
https://api.github.com/repos/huggingface/datasets/issues/5993/events
https://github.com/huggingface/datasets/issues/5993
1,776,643,555
I_kwDODunzps5p5W3j
5,993
ValueError: Table schema does not match schema used to create file
{ "login": "exs-avianello", "id": 128361578, "node_id": "U_kgDOB6akag", "avatar_url": "https://avatars.githubusercontent.com/u/128361578?v=4", "gravatar_id": "", "url": "https://api.github.com/users/exs-avianello", "html_url": "https://github.com/exs-avianello", "followers_url": "https://api.github.com/users/exs-avianello/followers", "following_url": "https://api.github.com/users/exs-avianello/following{/other_user}", "gists_url": "https://api.github.com/users/exs-avianello/gists{/gist_id}", "starred_url": "https://api.github.com/users/exs-avianello/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/exs-avianello/subscriptions", "organizations_url": "https://api.github.com/users/exs-avianello/orgs", "repos_url": "https://api.github.com/users/exs-avianello/repos", "events_url": "https://api.github.com/users/exs-avianello/events{/privacy}", "received_events_url": "https://api.github.com/users/exs-avianello/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "We'll do a new release of `datasets` soon to make the fix available :)\r\n\r\nIn the meantime you can use `datasets` from source (main)", "Thank you very much @lhoestq ! 🚀 " ]
2023-06-27T10:54:07
2023-06-27T15:36:42
2023-06-27T15:32:44
NONE
null
null
null
### Describe the bug Saving a dataset as parquet fails with a `ValueError: Table schema does not match schema used to create file` if the dataset was obtained out of a `.select_columns()` call with columns selected out of order. ### Steps to reproduce the bug ```python import datasets dataset = datasets.Dataset.from_dict( { "x1": [1, 2, 3], "x2": [10, 11, 12], } ) ds = dataset.select_columns(["x2", "x1"]) ds.to_parquet("demo.parquet") ``` ```shell >>> ValueError: Table schema does not match schema used to create file: table: x2: int64 x1: int64 -- schema metadata -- huggingface: '{"info": {"features": {"x2": {"dtype": "int64", "_type": "V' + 53 vs. file: x1: int64 x2: int64 -- schema metadata -- huggingface: '{"info": {"features": {"x1": {"dtype": "int64", "_type": "V' + 53 ``` --- I think this is because after the `.select_columns()` call with out of order columns, the output dataset features' schema ends up being out of sync with the schema of the arrow table backing it. ```python ds.features.arrow_schema >>> x1: int64 x2: int64 -- schema metadata -- huggingface: '{"info": {"features": {"x1": {"dtype": "int64", "_type": "V' + 53 ds.data.schema >>> x2: int64 x1: int64 -- schema metadata -- huggingface: '{"info": {"features": {"x2": {"dtype": "int64", "_type": "V' + 53 ``` So when we call `.to_parquet()`, the call behind the scenes to `datasets.io.parquet.ParquetDatasetWriter(...).write()` which initialises the backend `pyarrow.parquet.ParquetWriter` with `schema = self.dataset.features.arrow_schema` triggers `pyarrow` on write when [it checks](https://github.com/apache/arrow/blob/11b140a734a516e436adaddaeb35d23f30dcce44/python/pyarrow/parquet/core.py#L1086-L1090) that the `ParquetWriter` schema matches the schema of the table being written 🙌 https://github.com/huggingface/datasets/blob/6ed837325cb539a5deb99129e5ad181d0269e050/src/datasets/io/parquet.py#L139-L141 ### Expected behavior The dataset gets successfully saved as parquet. *In the same way as it does if saving it as csv: ```python import datasets dataset = datasets.Dataset.from_dict( { "x1": [1, 2, 3], "x2": [10, 11, 12], } ) ds = dataset.select_columns(["x2", "x1"]) ds.to_csv("demo.csv") ``` ### Environment info `python==3.11` `datasets==2.13.1`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5993/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5993/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5991
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5991/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5991/comments
https://api.github.com/repos/huggingface/datasets/issues/5991/events
https://github.com/huggingface/datasets/issues/5991
1,774,456,518
I_kwDODunzps5pxA7G
5,991
`map` with any joblib backend
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[]
2023-06-26T10:33:42
2023-06-26T10:33:42
null
MEMBER
null
null
null
We recently enabled the (experimental) parallel backend switch for data download and extraction but not for `map` yet. Right now we're using our `iflatmap_unordered` implementation for multiprocessing that uses a shared Queue to gather progress updates from the subprocesses and show a progress bar in the main process. If a Queue implementation that would work on any joblib backend by leveraging the filesystem that is shared among workers, we can have `iflatmap_unordered` for joblib and therefore a `map` with any joblib backend with a progress bar ! Note that the Queue doesn't need to be that optimized though since we can choose a small frequency for progress updates (like 1 update per second).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5991/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5991/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5989
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5989/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5989/comments
https://api.github.com/repos/huggingface/datasets/issues/5989/events
https://github.com/huggingface/datasets/issues/5989
1,774,134,091
I_kwDODunzps5pvyNL
5,989
Set a rule on the config and split names
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "in this case we need to decide what to do with the existing datasets with white space characters (there shouldn't be a lot of them I think)", "I imagine that we should stop supporting them, and help the user fix them?", "See a report where the datasets server fails: https://huggingface.co/datasets/poloclub/diffusiondb/discussions/2#6374ff55b93cbdf65675f564\r\n\r\nThe config name is `random_10k [2m]`!" ]
2023-06-26T07:34:14
2023-07-19T14:22:54
null
CONTRIBUTOR
null
null
null
> should we actually allow characters like spaces? maybe it's better to add validation for whitespace symbols and directly in datasets and raise https://github.com/huggingface/datasets-server/issues/853
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5989/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5989/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5988
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5988/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5988/comments
https://api.github.com/repos/huggingface/datasets/issues/5988/events
https://github.com/huggingface/datasets/issues/5988
1,773,257,828
I_kwDODunzps5pscRk
5,988
ConnectionError: Couldn't reach dataset_infos.json
{ "login": "yulingao", "id": 20674868, "node_id": "MDQ6VXNlcjIwNjc0ODY4", "avatar_url": "https://avatars.githubusercontent.com/u/20674868?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yulingao", "html_url": "https://github.com/yulingao", "followers_url": "https://api.github.com/users/yulingao/followers", "following_url": "https://api.github.com/users/yulingao/following{/other_user}", "gists_url": "https://api.github.com/users/yulingao/gists{/gist_id}", "starred_url": "https://api.github.com/users/yulingao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yulingao/subscriptions", "organizations_url": "https://api.github.com/users/yulingao/orgs", "repos_url": "https://api.github.com/users/yulingao/repos", "events_url": "https://api.github.com/users/yulingao/events{/privacy}", "received_events_url": "https://api.github.com/users/yulingao/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Unfortunately, I can't reproduce the error. What does the following code return for you?\r\n```python\r\nimport requests\r\nfrom huggingface_hub import hf_hub_url\r\nr = requests.get(hf_hub_url(\"codeparrot/codeparrot-clean-train\", \"dataset_infos.json\", repo_type=\"dataset\"))\r\n```\r\n\r\nAlso, can you provide more info about your network (region, proxies, etc.)?" ]
2023-06-25T12:39:31
2023-07-07T13:20:57
2023-07-07T13:20:57
NONE
null
null
null
### Describe the bug I'm trying to load codeparrot/codeparrot-clean-train, but get the following error: ConnectionError: Couldn't reach https://huggingface.co/datasets/codeparrot/codeparrot-clean-train/resolve/main/dataset_infos.json (ConnectionError(ProtocolError('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')))) ### Steps to reproduce the bug train_data = load_dataset('codeparrot/codeparrot-clean-train', split='train') ### Expected behavior download the dataset ### Environment info centos7
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5988/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5988/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5987
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5987/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5987/comments
https://api.github.com/repos/huggingface/datasets/issues/5987/events
https://github.com/huggingface/datasets/issues/5987
1,773,047,909
I_kwDODunzps5prpBl
5,987
Why max_shard_size is not supported in load_dataset and passed to download_and_prepare
{ "login": "npuichigo", "id": 11533479, "node_id": "MDQ6VXNlcjExNTMzNDc5", "avatar_url": "https://avatars.githubusercontent.com/u/11533479?v=4", "gravatar_id": "", "url": "https://api.github.com/users/npuichigo", "html_url": "https://github.com/npuichigo", "followers_url": "https://api.github.com/users/npuichigo/followers", "following_url": "https://api.github.com/users/npuichigo/following{/other_user}", "gists_url": "https://api.github.com/users/npuichigo/gists{/gist_id}", "starred_url": "https://api.github.com/users/npuichigo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/npuichigo/subscriptions", "organizations_url": "https://api.github.com/users/npuichigo/orgs", "repos_url": "https://api.github.com/users/npuichigo/repos", "events_url": "https://api.github.com/users/npuichigo/events{/privacy}", "received_events_url": "https://api.github.com/users/npuichigo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Can you explain your use case for `max_shard_size`? \r\n\r\nOn some systems, there is a limit to the size of a memory-mapped file, so we could consider exposing this parameter in `load_dataset`.", "In my use case, users may choose a proper size to balance the cost and benefit of using large shard size. (On azure blob or hdfs which may automatically download the shard from background)", "But `load_dataset` doesn't support caching (and reading) Arrow datasets from remote storage. \r\n\r\n`load_datset_builder` + `download_and_prepare` is not equal to `load_dataset`. The latter has one more step, `builder.as_dataset`, that memory-maps Arrow files, which only works for local files.", "Thanks. So if I want to use `IterableDataset` and control the size of single arrow file, how should I organize the data loader? Maybe `load_dataset_build` + `download_and_prepare` + `builder.as_dataset` + `dataset.to_iterable_dataset`?", "Yes, this should work.\r\n\r\nI think we can expose `max_shard_size` in `load_dataset`, so feel free to open a PR." ]
2023-06-25T04:19:13
2023-06-29T16:06:08
2023-06-29T16:06:08
CONTRIBUTOR
null
null
null
### Describe the bug https://github.com/huggingface/datasets/blob/a8a797cc92e860c8d0df71e0aa826f4d2690713e/src/datasets/load.py#L1809 What I can to is break the `load_dataset` and use `load_datset_builder` + `download_and_prepare` instead. ### Steps to reproduce the bug https://github.com/huggingface/datasets/blob/a8a797cc92e860c8d0df71e0aa826f4d2690713e/src/datasets/load.py#L1809 ### Expected behavior Users can define the max shard size. ### Environment info datasets==2.13.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5987/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5987/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5985
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5985/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5985/comments
https://api.github.com/repos/huggingface/datasets/issues/5985/events
https://github.com/huggingface/datasets/issues/5985
1,771,588,158
I_kwDODunzps5pmEo-
5,985
Cannot reuse tokenizer object for dataset map
{ "login": "vikigenius", "id": 12724810, "node_id": "MDQ6VXNlcjEyNzI0ODEw", "avatar_url": "https://avatars.githubusercontent.com/u/12724810?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vikigenius", "html_url": "https://github.com/vikigenius", "followers_url": "https://api.github.com/users/vikigenius/followers", "following_url": "https://api.github.com/users/vikigenius/following{/other_user}", "gists_url": "https://api.github.com/users/vikigenius/gists{/gist_id}", "starred_url": "https://api.github.com/users/vikigenius/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vikigenius/subscriptions", "organizations_url": "https://api.github.com/users/vikigenius/orgs", "repos_url": "https://api.github.com/users/vikigenius/repos", "events_url": "https://api.github.com/users/vikigenius/events{/privacy}", "received_events_url": "https://api.github.com/users/vikigenius/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892865, "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate", "name": "duplicate", "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists" } ]
closed
false
null
[]
null
[ "This is a known issue: https://github.com/huggingface/datasets/issues/3847.\r\n\r\nFixing this requires significant work - rewriting the `tokenizers` lib to make them immutable.\r\n\r\nThe current solution is to pass `cache_file_name` to `map` to use that file for caching or calling a tokenizer before `map` (with the same set of parameters as the ones in the map transform)", "Closing since this is a duplicate" ]
2023-06-23T14:45:31
2023-07-21T14:09:14
2023-07-21T14:09:14
NONE
null
null
null
### Describe the bug Related to https://github.com/huggingface/transformers/issues/24441. Not sure if this is a tokenizer issue or caching issue, so filing in both. Passing the tokenizer to the dataset map function causes the tokenizer to be fingerprinted weirdly. After calling the tokenizer with arguments like padding and truncation the tokenizer object changes interanally, even though the hash remains the same. But dumps is able to detect that internal change which causes the tokenizer object's fingerprint to change. ### Steps to reproduce the bug ```python from transformers import AutoTokenizer from datasets.utils.py_utils import dumps # Huggingface datasets t = AutoTokenizer.from_pretrained('bert-base-uncased') t.save_pretrained("tok1") th1 = hash(dumps(t)) text = "This is an example text" ttext = t(text, max_length=512, padding="max_length", truncation=True) t.save_pretrained("tok2") th2 = hash(dumps(t)) assert th1 == th2 # Assertion Error ``` But if you use just the hash of the object without dumps, the hashes don't change ```python from transformers import AutoTokenizer from datasets.utils.py_utils import dumps # Huggingface datasets t = AutoTokenizer.from_pretrained('bert-base-uncased') th1 = hash(t) # Just hash no dumps text = "This is an example text" ttext = t(text, max_length=512, padding="max_length", truncation=True) th2 = hash(t) # Just hash no dumps assert th1 == th2 # This is OK ``` This causes situations such as the following 1. Create a text file like this `yes "This is an example text" | head -n 10000 > lines.txt` ```python from transformers import AutoTokenizer import datasets class TokenizeMapper(object): """Mapper for tokenizer. This is needed because the caching mechanism of HuggingFace does not work on lambdas. Each time a new lambda will be created by a new process which will lead to a different hash. This way we can have a universal mapper object in init and reuse it with the same hash for each process. """ def __init__(self, tokenizer): """Initialize the tokenizer.""" self.tokenizer = tokenizer def __call__(self, examples, **kwargs): """Run the mapper.""" texts = examples["text"] tt = self.tokenizer(texts, max_length=256, padding="max_length", truncation=True) batch_outputs = { "input_ids": tt.input_ids, "attention_mask": tt.attention_mask, } return batch_outputs t = AutoTokenizer.from_pretrained('bert-base-uncased') mapper = TokenizeMapper(t) ds = datasets.load_dataset("text", data_files="lines.txt") mds1 = ds.map( mapper, batched=False, remove_columns=["text"], ).with_format("torch") mds2 = ds.map( mapper, batched=False, remove_columns=["text"], ).with_format("torch") ``` The second call to map should reuse the cached processed dataset from mds1, but it instead it redoes the tokenization because of the behavior of dumps. ### Expected behavior We should be able to initialize a tokenizer. And reusing it should let us reuse the same map computation for the same dataset. The second call to map should reuse the cached processed dataset from mds1, but it instead it redoes the tokenization because of the behavior of dumps. ### Environment info - `datasets` version: 2.13.0 - Platform: Linux-6.1.31_1-x86_64-with-glibc2.36 - Python version: 3.9.16 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.1 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5985/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5985/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5984
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5984/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5984/comments
https://api.github.com/repos/huggingface/datasets/issues/5984/events
https://github.com/huggingface/datasets/issues/5984
1,771,571,458
I_kwDODunzps5pmAkC
5,984
AutoSharding IterableDataset's when num_workers > 1
{ "login": "mathephysicist", "id": 25594384, "node_id": "MDQ6VXNlcjI1NTk0Mzg0", "avatar_url": "https://avatars.githubusercontent.com/u/25594384?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mathephysicist", "html_url": "https://github.com/mathephysicist", "followers_url": "https://api.github.com/users/mathephysicist/followers", "following_url": "https://api.github.com/users/mathephysicist/following{/other_user}", "gists_url": "https://api.github.com/users/mathephysicist/gists{/gist_id}", "starred_url": "https://api.github.com/users/mathephysicist/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mathephysicist/subscriptions", "organizations_url": "https://api.github.com/users/mathephysicist/orgs", "repos_url": "https://api.github.com/users/mathephysicist/repos", "events_url": "https://api.github.com/users/mathephysicist/events{/privacy}", "received_events_url": "https://api.github.com/users/mathephysicist/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "For this to be possible, we would have to switch from the \"Streaming\" Arrow format to the \"Random Access\" (IPC/Feather) format, which allows reading arbitrary record batches (explained [here](https://arrow.apache.org/docs/python/ipc.html)). We could then use these batches to construct shards.\r\n\r\n@lhoestq @albertvillanova Do you think this use case is worth the switch? Also, we currently shard files, not inner row groups/chunks. Should we also support sharding row groups (e.g. if the number of input files is 1)?\r\n\r\nPS: I don't expect significant speed-up for local, uncompressed Arrow files.", "Alternatively we could support multiprocessing map for iterable datasets and let the user do the CPU intensive task there ?\r\n\r\nThis way it would work on arrow data but also on any iterable dataset", "> For this to be possible, we would have to switch from the \"Streaming\" Arrow format to the \"Random Access\" (IPC/Feather) format, which allows reading arbitrary record batches (explained [here](https://arrow.apache.org/docs/python/ipc.html)). We could then use these batches to construct shards.\r\n> \r\n> @lhoestq @albertvillanova Do you think this use case is worth the switch? Also, we currently shard files, not inner row groups/chunks. Should we also support sharding row groups (e.g. if the number of input files is 1)?\r\n> \r\n> PS: I don't expect significant speed-up for local, uncompressed Arrow files.\r\n\r\nCould you explain why you'd need to change the arrow format?\r\n\r\nWhen we use streaming datasets we simply determine the number of worker shards and then add some modulo logic at the appropriate place. Worst case scenario, you'd skip streaming entries according to the number of shards.\r\n\r\nFor PyTorch, I'd be happy to provide an implementation or a sketch thereof, if you point me toward what the testing requirements would be for such a PR.", "> Could you explain why you'd need to change the arrow format?\r\n\r\nThis way workers have random access to the location of the file where its dataset subset starts. Currently we're using the Arrow streaming format which doesn't include the metadata of the record batches offsets. This is needed here to efficiently split a dataset made of one single file.", "> > Could you explain why you'd need to change the arrow format?\r\n> \r\n> This way workers have random access to the location of the file where its dataset subset starts. Currently we're using the Arrow streaming format which doesn't include the metadata of the record batches offsets. This is needed here to efficiently split a dataset made of one single file.\r\n\r\nI guess I don't understand why you'd need to subset the dataset in the first place. \r\nIt seems sufficient to figure out how to offset or skip rows.\r\n\r\nFor instance, using pyArrow, you could use RecordBatchStreamReader to zero-copy iterate over records with read_next_batch and then only initiate the next step for records modulo worker shard.\r\nThat's one way to do it, where of course you'd need to account for gpu sharding as well.\r\n\r\n\r\nOtherwise, how did you implement worker/node/GPU sharding for iterable/streaming data where you do not have index information or prior splits (e.g. files)?", "> For instance, using pyArrow, you could use RecordBatchStreamReader to zero-copy iterate over records with read_next_batch and then only initiate the next step for records modulo worker shard.\r\n\r\nThat works indeed ! And what we meant is that you can make it even faster to instantiate. Indeed using RecordBatchStreamReader you need to get the list of all the record batches in each worker, whereas you could just get the list of record batches per worker if you use the record batches locations in the Arrow IPC file footer. This would be especially appreciated to have a fast instantiation in case you have tens of thousands of Arrow files for example.", "Any recent updates on this ? " ]
2023-06-23T14:34:20
2023-12-08T09:04:04
null
NONE
null
null
null
### Feature request Minimal Example ``` import torch from datasets import IterableDataset d = IterableDataset.from_file(<file_name>) dl = torch.utils.data.dataloader.DataLoader(d,num_workers=3) for sample in dl: print(sample) ``` Warning: Too many dataloader workers: 2 (max is dataset.n_shards=1). Stopping 1 dataloader workers. To parallelize data loading, we give each process some shards (or data sources) to process. Therefore it's unnecessary to have a number of workers greater than dataset.n_shards=1. To enable more parallelism, please split the dataset in more files than 1. Expected Behavior: Dataset is sharded each cpu uses subset (contiguously - so you can do checkpoint loading/saving) ### Motivation I have a lot of unused cpu's and would like to be able to shard iterable datasets with pytorch's dataloader when num_workers > 1. This is for a very large single file. I am aware that we can use the `split_dataset_by_node` to ensure that each node (for distributed) gets different shards, but we should extend it so that this also continues for multiple workers. ### Your contribution If someone points me to what needs to change, I can create a PR.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5984/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5984/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5982
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5982/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5982/comments
https://api.github.com/repos/huggingface/datasets/issues/5982/events
https://github.com/huggingface/datasets/issues/5982
1,770,333,296
I_kwDODunzps5phSRw
5,982
404 on Datasets Documentation Page
{ "login": "kmulka-bloomberg", "id": 118509387, "node_id": "U_kgDOBxBPSw", "avatar_url": "https://avatars.githubusercontent.com/u/118509387?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kmulka-bloomberg", "html_url": "https://github.com/kmulka-bloomberg", "followers_url": "https://api.github.com/users/kmulka-bloomberg/followers", "following_url": "https://api.github.com/users/kmulka-bloomberg/following{/other_user}", "gists_url": "https://api.github.com/users/kmulka-bloomberg/gists{/gist_id}", "starred_url": "https://api.github.com/users/kmulka-bloomberg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kmulka-bloomberg/subscriptions", "organizations_url": "https://api.github.com/users/kmulka-bloomberg/orgs", "repos_url": "https://api.github.com/users/kmulka-bloomberg/repos", "events_url": "https://api.github.com/users/kmulka-bloomberg/events{/privacy}", "received_events_url": "https://api.github.com/users/kmulka-bloomberg/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "This wasn’t working for me a bit earlier, but it looks to be back up now", "We had a minor issue updating the docs after the latest release. It should work now :)." ]
2023-06-22T20:14:57
2023-06-26T15:45:03
2023-06-26T15:45:03
NONE
null
null
null
### Describe the bug Getting a 404 from the Hugging Face Datasets docs page: https://huggingface.co/docs/datasets/index ### Steps to reproduce the bug 1. Go to URL https://huggingface.co/docs/datasets/index 2. Notice 404 not found ### Expected behavior URL should either show docs or redirect to new location ### Environment info hugginface.co
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5982/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5982/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5981
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5981/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5981/comments
https://api.github.com/repos/huggingface/datasets/issues/5981/events
https://github.com/huggingface/datasets/issues/5981
1,770,310,087
I_kwDODunzps5phMnH
5,981
Only two cores are getting used in sagemaker with pytorch 3.10 kernel
{ "login": "mmr-crexi", "id": 107141022, "node_id": "U_kgDOBmLXng", "avatar_url": "https://avatars.githubusercontent.com/u/107141022?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mmr-crexi", "html_url": "https://github.com/mmr-crexi", "followers_url": "https://api.github.com/users/mmr-crexi/followers", "following_url": "https://api.github.com/users/mmr-crexi/following{/other_user}", "gists_url": "https://api.github.com/users/mmr-crexi/gists{/gist_id}", "starred_url": "https://api.github.com/users/mmr-crexi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mmr-crexi/subscriptions", "organizations_url": "https://api.github.com/users/mmr-crexi/orgs", "repos_url": "https://api.github.com/users/mmr-crexi/repos", "events_url": "https://api.github.com/users/mmr-crexi/events{/privacy}", "received_events_url": "https://api.github.com/users/mmr-crexi/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "I think it's more likely that this issue is related to PyTorch than Datasets, as PyTorch (on import) registers functions to execute when forking a process. Maybe this is the culprit: https://github.com/pytorch/pytorch/issues/99625", "From reading that ticket, it may be down in mkl? Is it worth hotfixing in the meantime, with the express intention of turning it off? I know that's a horribly crufty solution, but it's also deeply frustrating to be limited to 2 cores for operations as simple as filtration.", "This is too specific and unrelated to `datasets`, so this shouldn't be fixed here.", "@mariosasko @mmr-crexi I had the exact same problem on my kubernetes cluster. the datasets subprocess only user 1 and 17 core" ]
2023-06-22T19:57:31
2023-10-30T06:17:40
2023-07-24T11:54:52
NONE
null
null
null
### Describe the bug When using the newer pytorch 3.10 kernel, only 2 cores are being used by huggingface filter and map functions. The Pytorch 3.9 kernel would use as many cores as specified in the num_proc field. We have solved this in our own code by placing the following snippet in the code that is called inside subprocesses: ```os.sched_setaffinity(0, {i for i in range(1000)})``` The problem, as near as we can tell, us that once upon a time, cpu affinity was set using a bitmask ("0xfffff" and the like), and affinity recently changed to a list of processors rather than to using the mask. As such, only processors 1 and 17 are shown to be working in htop. ![Selection_072](https://github.com/huggingface/datasets/assets/107141022/04c5a824-5321-4531-afca-7bc84dff36b4) When running functions via `map`, the above resetting of affinity works to spread across the cores. When using `filter`, however, only two cores are active. ### Steps to reproduce the bug Repro steps: 1. Create an aws sagemaker instance 2. use the pytorch 3_10 kernel 3. Load a dataset 4. run a filter operation 5. watch as only 2 cores are used when num_proc > 2 6. run a map operation 7. watch as only 2 cores are used when num_proc > 2 8. run a map operation with processor affinity reset inside the function called via map 9. Watch as all cores run ### Expected behavior All specified cores are used via the num_proc argument. ### Environment info AWS sagemaker with the following init script run in the terminal after instance creation: conda init bash bash conda activate pytorch_p310 pip install Wand PyPDF pytesseract datasets seqeval pdfplumber transformers pymupdf sentencepiece timm donut-python accelerate optimum xgboost python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' sudo yum -y install htop sudo yum -y update sudo yum -y install wget libstdc++ autoconf automake libtool autoconf-archive pkg-config gcc gcc-c++ make libjpeg-devel libpng-devel libtiff-devel zlib-devel
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5981/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5981/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5980
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5980/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5980/comments
https://api.github.com/repos/huggingface/datasets/issues/5980/events
https://github.com/huggingface/datasets/issues/5980
1,770,255,973
I_kwDODunzps5pg_Zl
5,980
Viewing dataset card returns “502 Bad Gateway”
{ "login": "tbenthompson", "id": 4241811, "node_id": "MDQ6VXNlcjQyNDE4MTE=", "avatar_url": "https://avatars.githubusercontent.com/u/4241811?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tbenthompson", "html_url": "https://github.com/tbenthompson", "followers_url": "https://api.github.com/users/tbenthompson/followers", "following_url": "https://api.github.com/users/tbenthompson/following{/other_user}", "gists_url": "https://api.github.com/users/tbenthompson/gists{/gist_id}", "starred_url": "https://api.github.com/users/tbenthompson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tbenthompson/subscriptions", "organizations_url": "https://api.github.com/users/tbenthompson/orgs", "repos_url": "https://api.github.com/users/tbenthompson/repos", "events_url": "https://api.github.com/users/tbenthompson/events{/privacy}", "received_events_url": "https://api.github.com/users/tbenthompson/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Can you try again? Maybe there was a minor outage.", "Yes, it seems to be working now. In case it's helpful, the outage lasted several days. It was failing as late as yesterday morning. ", "we fixed something on the server side, glad it's fixed now" ]
2023-06-22T19:14:48
2023-06-27T08:38:19
2023-06-26T14:42:45
NONE
null
null
null
The url is: https://huggingface.co/datasets/Confirm-Labs/pile_ngrams_trigrams I am able to successfully view the “Files and versions” tab: [Confirm-Labs/pile_ngrams_trigrams at main](https://huggingface.co/datasets/Confirm-Labs/pile_ngrams_trigrams/tree/main) Any help would be appreciated! Thanks! I hope this is the right place to report an issue like this.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5980/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5980/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5975
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5975/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5975/comments
https://api.github.com/repos/huggingface/datasets/issues/5975/events
https://github.com/huggingface/datasets/issues/5975
1,768,271,343
I_kwDODunzps5pZa3v
5,975
Streaming Dataset behind Proxy - FileNotFoundError
{ "login": "Veluchs", "id": 135350576, "node_id": "U_kgDOCBFJMA", "avatar_url": "https://avatars.githubusercontent.com/u/135350576?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Veluchs", "html_url": "https://github.com/Veluchs", "followers_url": "https://api.github.com/users/Veluchs/followers", "following_url": "https://api.github.com/users/Veluchs/following{/other_user}", "gists_url": "https://api.github.com/users/Veluchs/gists{/gist_id}", "starred_url": "https://api.github.com/users/Veluchs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Veluchs/subscriptions", "organizations_url": "https://api.github.com/users/Veluchs/orgs", "repos_url": "https://api.github.com/users/Veluchs/repos", "events_url": "https://api.github.com/users/Veluchs/events{/privacy}", "received_events_url": "https://api.github.com/users/Veluchs/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Duplicate of #", "Hi ! can you try to set the upper case environment variables `HTTP_PROXY` and `HTTPS_PROXY` ?\r\n\r\nWe use `aiohttp` for streaming and it uses case sensitive environment variables", "Hi, thanks for the quick reply.\r\n\r\nI set the uppercase env variables with\r\n\r\n`\r\nos.environ['HTTP_PROXY'] = \"http://example.com:xxxx\" \r\nos.environ['HTTPS_PROXY'] = \"http://example.com:xxxx\" \r\n`\r\n\r\nHowever, I still get the same error.\r\n\r\nOne thing that could be helpfull: When downloading a dataset without streaming i get the following message:\r\n_HF google storage unreachable. Downloading and preparing it from source_.\r\nThe download does however work as expected.\r\n", "Are you able to use `aiohttp` to get the file at `https://huggingface.co/datasets/facebook/voxpopuli/resolve/main/data/n_files.json` using your proxy ?", "It only works when passing trust_env=True when creating the ClientSession, as well as setting ssl=False.\r\n\r\nWorking Example:\r\n\r\n```\r\nimport os\r\n\r\nos.environ['HTTP_PROXY'] = \"xyz\"\r\nos.environ['HTTPS_PROXY'] = \"xyz\"\r\n\r\nimport asyncio\r\nimport aiohttp\r\n\r\nasync def download_pep(url):\r\n async with aiohttp.ClientSession(trust_env=True) as session:\r\n print(\"1\")\r\n async with session.get(url, ssl=False) as resp:\r\n print(\"2\")\r\n content = await resp.text()\r\n print(content)\r\n return content\r\n\r\nasyncio.run(download_pep(\"https://huggingface.co/datasets/facebook/voxpopuli/resolve/main/data/n_files.json\"))\r\n```\r\n\r\n\r\n\r\nSSL Verification has been a problem with other packages as well. Usually I circumvent the problem by setting\r\n```\r\nimport ssl\r\nssl._create_default_https_context = ssl._create_unverified_context\r\n```\r\n(probably not the best idea for security), although here aiohttp does not seem to use this default context.", "We do pass `trust_env` as well. Could you share the full stack trace you get when streaming using `datasets` ? That could help locate where we might have forgotten to pass `trust_env`", "Is there a way to disable ssl verification when streaming a dataset. I suspect this might be the isssue with my proxy.\r\n\r\n\r\nHere you go:\r\n\r\n```\r\nFileNotFoundError Traceback (most recent call last)\r\nCell In[8], line 3\r\n 1 from datasets import load_dataset\r\n----> 3 ds = load_dataset(\"facebook/voxpopuli\", name=\"de\", streaming=True)\r\n 5 sample = next(iter(ds))\r\n\r\nFile [~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/load.py:1790](https://vscode-remote+ssh-002dremote-002bml-002er-002dsoftware-002eat.vscode-resource.vscode-cdn.net/home/wrsbri/projects/audio_course/~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/load.py:1790), in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)\r\n 1788 # Return iterable dataset in case of streaming\r\n 1789 if streaming:\r\n-> 1790 return builder_instance.as_streaming_dataset(split=split)\r\n 1792 # Some datasets are already processed on the HF google storage\r\n 1793 # Don't try downloading from Google storage for the packaged datasets as text, json, csv or pandas\r\n 1794 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES\r\n\r\nFile [~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/builder.py:1281](https://vscode-remote+ssh-002dremote-002bml-002er-002dsoftware-002eat.vscode-resource.vscode-cdn.net/home/wrsbri/projects/audio_course/~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/builder.py:1281), in DatasetBuilder.as_streaming_dataset(self, split, base_path)\r\n 1274 dl_manager = StreamingDownloadManager(\r\n 1275 base_path=base_path or self.base_path,\r\n 1276 download_config=DownloadConfig(use_auth_token=self.use_auth_token, storage_options=self.storage_options),\r\n 1277 dataset_name=self.name,\r\n 1278 data_dir=self.config.data_dir,\r\n 1279 )\r\n 1280 self._check_manual_download(dl_manager)\r\n-> 1281 splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}\r\n 1282 # By default, return all splits\r\n 1283 if split is None:\r\n\r\nFile [~/.cache/huggingface/modules/datasets_modules/datasets/facebook--voxpopuli/b5ff837284f0778eefe0f642734e142d8c3f574eba8c9c8a4b13602297f73604/voxpopuli.py:120](https://vscode-remote+ssh-002dremote-002bml-002er-002dsoftware-002eat.vscode-resource.vscode-cdn.net/home/wrsbri/projects/audio_course/~/.cache/huggingface/modules/datasets_modules/datasets/facebook--voxpopuli/b5ff837284f0778eefe0f642734e142d8c3f574eba8c9c8a4b13602297f73604/voxpopuli.py:120), in Voxpopuli._split_generators(self, dl_manager)\r\n 118 def _split_generators(self, dl_manager):\r\n 119 n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)\r\n--> 120 with open(n_shards_path) as f:\r\n 121 n_shards = json.load(f)\r\n 123 if self.config.name == \"en_accented\":\r\n\r\nFile [~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/streaming.py:71](https://vscode-remote+ssh-002dremote-002bml-002er-002dsoftware-002eat.vscode-resource.vscode-cdn.net/home/wrsbri/projects/audio_course/~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/streaming.py:71), in extend_module_for_streaming..wrap_auth..wrapper(*args, **kwargs)\r\n 69 @wraps(function)\r\n 70 def wrapper(*args, **kwargs):\r\n---> 71 return function(*args, use_auth_token=use_auth_token, **kwargs)\r\n\r\nFile [~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:517](https://vscode-remote+ssh-002dremote-002bml-002er-002dsoftware-002eat.vscode-resource.vscode-cdn.net/home/wrsbri/projects/audio_course/~/.conda/envs/audio_hf/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py:517), in xopen(file, mode, use_auth_token, *args, **kwargs)\r\n 515 except FileNotFoundError:\r\n 516 if file.startswith(config.HF_ENDPOINT):\r\n--> 517 raise FileNotFoundError(\r\n 518 file + \"\\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`.\"\r\n 519 ) from None\r\n 520 else:\r\n 521 raise\r\n\r\nFileNotFoundError: https://huggingface.co/datasets/facebook/voxpopuli/resolve/main/data/n_files.json\r\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`.\r\n```", "> Is there a way to disable ssl verification when streaming a dataset.\r\n\r\nI don't think so.\r\n\r\nWe use `fsspec` HTTPFileSystem implementation that is based on `aiohttp`. If you register a subclass of HTTPFileSystem that has SSL disabled by default it could work, but I wouldn't recommended it because it can raise security issues.", "Okay thanks for your help! I guess I have to figure out how to improve the proxy environment / see if I can make it work with ssl connections." ]
2023-06-21T19:10:02
2023-06-30T05:55:39
2023-06-30T05:55:38
NONE
null
null
null
### Describe the bug When trying to stream a dataset i get the following error after a few minutes of waiting. ``` FileNotFoundError: https://huggingface.co/datasets/facebook/voxpopuli/resolve/main/data/n_files.json If the repo is private or gated, make sure to log in with `huggingface-cli login`. ``` I have already set the proxy environment variables. Downloading a Dataset without streaming works as expected. Still i suspect that this is connected to being behind a proxy. Is there a way to set the proxy for streaming datasets? Possibly a keyword argument that gets passed to ffspec? ### Steps to reproduce the bug This is the code i use. ``` import os os.environ['http_proxy'] = "http://example.com:xxxx" os.environ['https_proxy'] = "http://example.com:xxxx" from datasets import load_dataset ds = load_dataset("facebook/voxpopuli", name="de", streaming=True) ``` ### Expected behavior I would expect the streaming functionality to use the set proxy settings. ### Environment info - `datasets` version: 2.13.0 - Platform: Linux-5.15.0-73-generic-x86_64-with-glibc2.35 - Python version: 3.10.11 - Huggingface_hub version: 0.15.1 - PyArrow version: 11.0.0 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5975/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5975/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5971
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5971/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5971/comments
https://api.github.com/repos/huggingface/datasets/issues/5971/events
https://github.com/huggingface/datasets/issues/5971
1,767,053,635
I_kwDODunzps5pUxlD
5,971
Docs: make "repository structure" easier to find
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
open
false
{ "login": "benjaminbrown038", "id": 35114142, "node_id": "MDQ6VXNlcjM1MTE0MTQy", "avatar_url": "https://avatars.githubusercontent.com/u/35114142?v=4", "gravatar_id": "", "url": "https://api.github.com/users/benjaminbrown038", "html_url": "https://github.com/benjaminbrown038", "followers_url": "https://api.github.com/users/benjaminbrown038/followers", "following_url": "https://api.github.com/users/benjaminbrown038/following{/other_user}", "gists_url": "https://api.github.com/users/benjaminbrown038/gists{/gist_id}", "starred_url": "https://api.github.com/users/benjaminbrown038/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/benjaminbrown038/subscriptions", "organizations_url": "https://api.github.com/users/benjaminbrown038/orgs", "repos_url": "https://api.github.com/users/benjaminbrown038/repos", "events_url": "https://api.github.com/users/benjaminbrown038/events{/privacy}", "received_events_url": "https://api.github.com/users/benjaminbrown038/received_events", "type": "User", "site_admin": false }
[ { "login": "benjaminbrown038", "id": 35114142, "node_id": "MDQ6VXNlcjM1MTE0MTQy", "avatar_url": "https://avatars.githubusercontent.com/u/35114142?v=4", "gravatar_id": "", "url": "https://api.github.com/users/benjaminbrown038", "html_url": "https://github.com/benjaminbrown038", "followers_url": "https://api.github.com/users/benjaminbrown038/followers", "following_url": "https://api.github.com/users/benjaminbrown038/following{/other_user}", "gists_url": "https://api.github.com/users/benjaminbrown038/gists{/gist_id}", "starred_url": "https://api.github.com/users/benjaminbrown038/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/benjaminbrown038/subscriptions", "organizations_url": "https://api.github.com/users/benjaminbrown038/orgs", "repos_url": "https://api.github.com/users/benjaminbrown038/repos", "events_url": "https://api.github.com/users/benjaminbrown038/events{/privacy}", "received_events_url": "https://api.github.com/users/benjaminbrown038/received_events", "type": "User", "site_admin": false } ]
null
[ "Loading a local dataset also works the same way when `data_files` are not specified, so I agree we should make this info easier to discover \r\n\r\ncc @stevhliu ", "Is this issue open? If so, I will self assign. ", "@benjaminbrown038 Yes, it is. Maybe @stevhliu can give some pointers on improving this doc page's discoverability.", "I think we can add a version of the [Main use-case](https://huggingface.co/docs/datasets/repository_structure#main-usecase) section to the [Share a dataset to the Hub](https://huggingface.co/docs/datasets/upload_dataset) tutorial. \r\n\r\nCurrently, it doesn't tell you *how* to structure the repository; it only tells you how to create it. So adding the \"main use-case\" will help bridge the gap and make it easier to find. We should also add a link to the [Structure your repository](https://huggingface.co/docs/datasets/repository_structure) guide for users who want to learn about the other options.", "#self-assign" ]
2023-06-21T08:26:44
2023-07-05T06:51:38
null
CONTRIBUTOR
null
null
null
The page https://huggingface.co/docs/datasets/repository_structure explains how to create a simple repository structure without a dataset script. It's the simplest way to create a dataset and should be easier to find, particularly on the docs' first pages.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5971/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5971/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5970
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5970/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5970/comments
https://api.github.com/repos/huggingface/datasets/issues/5970/events
https://github.com/huggingface/datasets/issues/5970
1,766,010,356
I_kwDODunzps5pQy30
5,970
description disappearing from Info when Uploading a Dataset Created with `from_dict`
{ "login": "balisujohn", "id": 20377292, "node_id": "MDQ6VXNlcjIwMzc3Mjky", "avatar_url": "https://avatars.githubusercontent.com/u/20377292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/balisujohn", "html_url": "https://github.com/balisujohn", "followers_url": "https://api.github.com/users/balisujohn/followers", "following_url": "https://api.github.com/users/balisujohn/following{/other_user}", "gists_url": "https://api.github.com/users/balisujohn/gists{/gist_id}", "starred_url": "https://api.github.com/users/balisujohn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/balisujohn/subscriptions", "organizations_url": "https://api.github.com/users/balisujohn/orgs", "repos_url": "https://api.github.com/users/balisujohn/repos", "events_url": "https://api.github.com/users/balisujohn/events{/privacy}", "received_events_url": "https://api.github.com/users/balisujohn/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Here's a minimal way to reproduce the bug, for the sake of convenience.\r\n````\r\nfrom datasets import Dataset, DatasetInfo, load_dataset\r\n\r\n\r\nepisodes_dict = {\"test\":[1,2,3],\"test2\": [1,2,4]}\r\n\r\nhugging_face_dataset = Dataset.from_dict(\r\n episodes_dict, info=DatasetInfo(description=\"test_str\")\r\n)\r\nprint(hugging_face_dataset.info)\r\n\r\nhugging_face_dataset.push_to_hub(\"balisujohn/minari_test\", private=True)\r\n\r\nredownloaded_dataset= load_dataset(\"balisujohn/minari_test\")[\"train\"]\r\n\r\n\r\nprint(redownloaded_dataset.info)\r\n````\r\n", "Thanks for reporting !\r\n\r\nFor now I would recommend uploading a separate JSON file for your metadata.\r\n\r\nAlternatively you can upload a second configuration of the dataset containing your metadata but this feature is not released yet (though you can already use it from [here](https://github.com/huggingface/datasets/pull/5331), it will be released soon)" ]
2023-06-20T19:18:26
2023-06-22T14:23:56
null
NONE
null
null
null
### Describe the bug When uploading a dataset created locally using `from_dict` with a specified `description` field. It appears before upload, but is missing after upload and re-download. ### Steps to reproduce the bug I think the most relevant pattern in the code might be the following lines: ``` description_json_str = json.dumps( { "dataset_id": dataset.spec.dataset_id, "env_name": dataset.spec.env_spec.id, "action_space": serialize_space(dataset.spec.action_space), "observation_space": serialize_space(dataset.spec.observation_space), } ) hugging_face_dataset = Dataset.from_dict( episodes_dict, info=DatasetInfo(description=description_json_str) ) ``` Which comes from this function https://github.com/balisujohn/minarai/blob/8e023727f0a8488c4451651d9f7a79b981412c40/minari/integrations/hugging_face.py#L39 To replicate, clone this branch of my Minari fork https://github.com/balisujohn/minarai/tree/dev-huggingface then run ``` python3.8 -m venv env source env/bin/activate python3 -m pip install -e . python3 -m pip install pytest ``` The change the hugging face repo path in the test called `test_hugging_face_push_and_pull_dataset` in `tests/integrations/test_hugging_face.py` to one you have permissions to write to. Then run: ``` pytest tests/integrations/test_hugging_face.py::test_hugging_face_push_and_pull_dataset ``` ### Expected behavior DATASET INFO BEFORE UPLOADING DatasetInfo(description='{"dataset_id": "dummy-combo-test-v0", "env_name": "DummyComboEnv-v0", "action_space": "{\\"type\\": \\"Tuple\\", \\"subspaces\\": [{\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [2.0], \\"high\\": [3.0]}, {\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [4.0], \\"high\\": [5.0]}]}", "observation_space": "{\\"type\\": \\"Tuple\\", \\"subspaces\\": [{\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [2.0], \\"high\\": [3.0]}, {\\"type\\": \\"Tuple\\", \\"subspaces\\": [{\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [2.0], \\"high\\": [3.0]}, {\\"type\\": \\"Dict\\", \\"subspaces\\": {\\"component_1\\": {\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [-1.0], \\"high\\": [1.0]}, \\"component_2\\": {\\"type\\": \\"Dict\\", \\"subspaces\\": {\\"subcomponent_1\\": {\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [2.0], \\"high\\": [3.0]}, \\"subcomponent_2\\": {\\"type\\": \\"Tuple\\", \\"subspaces\\": [{\\"type\\": \\"Box\\", \\"dtype\\": \\"float32\\", \\"shape\\": [1], \\"low\\": [4.0], \\"high\\": [5.0]}, {\\"type\\": \\"Discrete\\", \\"dtype\\": \\"int64\\", \\"start\\": 0, \\"n\\": 10}]}}}}}]}]}"}', citation='', homepage='', license='', features={'observations': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': {'component_1': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), 'component_2': {'subcomponent_1': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), 'subcomponent_2': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': Value(dtype='int64', id=None)}}}}}, 'actions': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None)}, 'rewards': Value(dtype='int64', id=None), 'truncations': Value(dtype='bool', id=None), 'terminations': Value(dtype='bool', id=None), 'episode_ids': Value(dtype='int64', id=None)}, post_processed=None, supervised_keys=None, task_templates=None, builder_name=None, config_name=None, version=None, splits=None, download_checksums=None, download_size=None, post_processing_size=None, dataset_size=None, size_in_bytes=None) ... DATASET INFO AFTER UPLOADING AND DOWNLOADING DatasetInfo(description='', citation='', homepage='', license='', features={'observations': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': {'component_1': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), 'component_2': {'subcomponent_1': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), 'subcomponent_2': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': Value(dtype='int64', id=None)}}}}}, 'actions': {'_index_0': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None), '_index_1': Sequence(feature=Value(dtype='float32', id=None), length=-1, id=None)}, 'rewards': Value(dtype='int64', id=None), 'truncations': Value(dtype='bool', id=None), 'terminations': Value(dtype='bool', id=None), 'episode_ids': Value(dtype='int64', id=None)}, post_processed=None, supervised_keys=None, task_templates=None, builder_name=None, config_name=None, version=None, splits={'train': SplitInfo(name='train', num_bytes=4846, num_examples=60, shard_lengths=None, dataset_name='parquet')}, download_checksums={'https://huggingface.co/datasets/balisujohn/minari_test/resolve/8217b614ff9ba5edc1a30c7df430e92a46f65363/data/train-00000-of-00001-7c5900b93b35745e.parquet': {'num_bytes': 9052, 'checksum': None}}, download_size=9052, post_processing_size=None, dataset_size=4846, size_in_bytes=13898) ... ### Environment info - `datasets` version: 2.13.0 - Platform: Linux-5.15.0-75-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.1 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5970/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5970/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5968
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5968/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5968/comments
https://api.github.com/repos/huggingface/datasets/issues/5968/events
https://github.com/huggingface/datasets/issues/5968
1,765,252,561
I_kwDODunzps5pN53R
5,968
Common Voice datasets still need `use_auth_token=True`
{ "login": "patrickvonplaten", "id": 23423619, "node_id": "MDQ6VXNlcjIzNDIzNjE5", "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/patrickvonplaten", "html_url": "https://github.com/patrickvonplaten", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "cc @pcuenca as well. \r\n\r\nNot super urgent btw", "The issue commes from the dataset itself and is not related to the `datasets` lib\r\n\r\nsee https://huggingface.co/datasets/mozilla-foundation/common_voice_6_1/blob/2c475b3b88e0f2e5828f830a4b91618a25ff20b7/common_voice_6_1.py#L148-L152", "Let's remove these lines in the dataset no? cc @anton-l @Vaibhavs10 ", "Addressed in:\r\n\r\n* `mozilla-foundation/common_voice_1_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_1_0/discussions/4)\r\n* `mozilla-foundation/common_voice_2_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_2_0/discussions/3)\r\n* `mozilla-foundation/common_voice_3_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_3_0/discussions/3)\r\n* `mozilla-foundation/common_voice_4_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_4_0/discussions/3)\r\n* `mozilla-foundation/common_voice_5_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_5_0/discussions/3)\r\n* `mozilla-foundation/common_voice_5_1` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_5_1/discussions/3)\r\n* `mozilla-foundation/common_voice_6_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_6_0/discussions/3)\r\n* `mozilla-foundation/common_voice_6_1` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_6_1/discussions/3)\r\n* `mozilla-foundation/common_voice_7_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0/discussions/3)\r\n* `mozilla-foundation/common_voice_8_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_8_0/discussions/7)\r\n* `mozilla-foundation/common_voice_9_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_9_0/discussions/8)\r\n* `mozilla-foundation/common_voice_10_0` [PR](https://huggingface.co/datasets/mozilla-foundation/common_voice_10_0/discussions/7)" ]
2023-06-20T11:58:37
2023-07-29T16:08:59
2023-07-29T16:08:58
CONTRIBUTOR
null
null
null
### Describe the bug We don't need to pass `use_auth_token=True` anymore to download gated datasets or models, so the following should work if correctly logged in. ```py from datasets import load_dataset load_dataset("mozilla-foundation/common_voice_6_1", "tr", split="train+validation") ``` However it throws an error - probably because something weird is hardcoded into the dataset loading script. ### Steps to reproduce the bug 1.) ``` huggingface-cli login ``` 2.) Make sure that you have accepted the license here: https://huggingface.co/datasets/mozilla-foundation/common_voice_6_1 3.) Run: ```py from datasets import load_dataset load_dataset("mozilla-foundation/common_voice_6_1", "tr", split="train+validation") ``` 4.) You'll get: ``` File ~/hf/lib/python3.10/site-packages/datasets/builder.py:963, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs) 961 split_dict = SplitDict(dataset_name=self.name) 962 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 963 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 965 # Checksums verification 966 if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums: File ~/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_6_1/f4d7854c466f5bd4908988dbd39044ec4fc634d89e0515ab0c51715c0127ffe3/common_voice_6_1.py:150, in CommonVoice._split_generators(self, dl_manager) 148 hf_auth_token = dl_manager.download_config.use_auth_token 149 if hf_auth_token is None: --> 150 raise ConnectionError( 151 "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset" 152 ) 154 bundle_url_template = STATS["bundleURLTemplate"] 155 bundle_version = bundle_url_template.split("/")[0] ConnectionError: Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset ``` ### Expected behavior One should not have to pass `use_auth_token=True`. Also see discussion here: https://github.com/huggingface/blog/pull/1243#discussion_r1235131150 ### Environment info ``` - `datasets` version: 2.13.0 - Platform: Linux-6.2.0-76060200-generic-x86_64-with-glibc2.35 - Python version: 3.10.6 - Huggingface_hub version: 0.16.0.dev0 - PyArrow version: 11.0.0 - Pandas version: 1.5.3 ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5968/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5968/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5967
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5967/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5967/comments
https://api.github.com/repos/huggingface/datasets/issues/5967/events
https://github.com/huggingface/datasets/issues/5967
1,763,926,520
I_kwDODunzps5pI2H4
5,967
Config name / split name lost after map with multiproc
{ "login": "sanchit-gandhi", "id": 93869735, "node_id": "U_kgDOBZhWpw", "avatar_url": "https://avatars.githubusercontent.com/u/93869735?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sanchit-gandhi", "html_url": "https://github.com/sanchit-gandhi", "followers_url": "https://api.github.com/users/sanchit-gandhi/followers", "following_url": "https://api.github.com/users/sanchit-gandhi/following{/other_user}", "gists_url": "https://api.github.com/users/sanchit-gandhi/gists{/gist_id}", "starred_url": "https://api.github.com/users/sanchit-gandhi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sanchit-gandhi/subscriptions", "organizations_url": "https://api.github.com/users/sanchit-gandhi/orgs", "repos_url": "https://api.github.com/users/sanchit-gandhi/repos", "events_url": "https://api.github.com/users/sanchit-gandhi/events{/privacy}", "received_events_url": "https://api.github.com/users/sanchit-gandhi/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "This must be due to DatasetInfo.from_merge which drops them and is used in `concatenate_datasets`.\r\n\r\nAnd you're experiencing this issue because multiprocessing does concatenate the resulting datasets from each process.\r\n\r\nMaybe they should be kept if all the subdatasets share the same values for config_name and split", "That sounds like a clean workaround!" ]
2023-06-19T17:27:36
2023-06-28T08:55:25
null
CONTRIBUTOR
null
null
null
### Describe the bug Performing a `.map` method on a dataset loses it's config name / split name only if run with multiproc ### Steps to reproduce the bug ```python from datasets import Audio, load_dataset from transformers import AutoFeatureExtractor import numpy as np # load dummy dataset libri = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean") # make train / test splits libri = libri["validation"].train_test_split(seed=42, shuffle=True, test_size=0.1) # example feature extractor model_id = "ntu-spml/distilhubert" feature_extractor = AutoFeatureExtractor.from_pretrained(model_id, do_normalize=True, return_attention_mask=True) sampling_rate = feature_extractor.sampling_rate libri = libri.cast_column("audio", Audio(sampling_rate=sampling_rate)) max_duration = 30.0 def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=int(feature_extractor.sampling_rate * max_duration), truncation=True, return_attention_mask=True, ) return inputs # single proc map libri_encoded = libri.map( preprocess_function, remove_columns=["audio", "file"], batched=True, num_proc=1 ) print(10 * "=" ,"Single processing", 10 * "=") print("Config name before: ", libri["train"].config_name, " Split name before: ", libri["train"].split) print("Config name after: ", libri_encoded["train"].config_name, " Split name after: ", libri_encoded["train"].split) # multi proc map libri_encoded = libri.map( preprocess_function, remove_columns=["audio", "file"], batched=True, num_proc=2 ) print(10 * "=" ,"Multi processing", 10 * "=") print("Config name before: ", libri["train"].config_name, " Split name before: ", libri["train"].split) print("Config name after: ", libri_encoded["train"].config_name, " Split name after: ", libri_encoded["train"].split) ``` **Print Output:** ``` ========== Single processing ========== Config name before: clean Split name before: validation Config name after: clean Split name after: validation ========== Multi processing ========== Config name before: clean Split name before: validation Config name after: None Split name after: None ``` => we can see that the config/split names are lost in the multiprocessing setting ### Expected behavior Should retain both config / split names in the multiproc setting ### Environment info - `datasets` version: 2.13.1.dev0 - Platform: Linux-5.15.0-67-generic-x86_64-with-glibc2.35 - Python version: 3.10.6 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5967/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5967/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5965
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5965/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5965/comments
https://api.github.com/repos/huggingface/datasets/issues/5965/events
https://github.com/huggingface/datasets/issues/5965
1,763,648,540
I_kwDODunzps5pHyQc
5,965
"Couldn't cast array of type" in complex datasets
{ "login": "piercefreeman", "id": 1712066, "node_id": "MDQ6VXNlcjE3MTIwNjY=", "avatar_url": "https://avatars.githubusercontent.com/u/1712066?v=4", "gravatar_id": "", "url": "https://api.github.com/users/piercefreeman", "html_url": "https://github.com/piercefreeman", "followers_url": "https://api.github.com/users/piercefreeman/followers", "following_url": "https://api.github.com/users/piercefreeman/following{/other_user}", "gists_url": "https://api.github.com/users/piercefreeman/gists{/gist_id}", "starred_url": "https://api.github.com/users/piercefreeman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/piercefreeman/subscriptions", "organizations_url": "https://api.github.com/users/piercefreeman/orgs", "repos_url": "https://api.github.com/users/piercefreeman/repos", "events_url": "https://api.github.com/users/piercefreeman/events{/privacy}", "received_events_url": "https://api.github.com/users/piercefreeman/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting! \r\n\r\nSpecifying the target features explicitly should avoid this error:\r\n```python\r\ndataset = dataset.map(\r\n batch_process,\r\n batched=True,\r\n batch_size=1,\r\n num_proc=1,\r\n remove_columns=dataset.column_names,\r\n features=datasets.Features({\"texts\": datasets.Sequence(datasets.Value(\"string\"))})\r\n)\r\n```\r\n\r\nThis error stems from our type promotion not handling the nested case. But this promotion/casting allocates memory in most scenarios, which can be problematic for large datasets, so explicitly passing the features is the optimal solution.", "Hi @mariosasko thanks for the context, this is helpful to know. Would it be worth having some logic to generate this explicit feature specification automatically if a type annotation for a .map returns a dataclass that can be inferred?\r\n\r\nFeels like something that would be easy to implement and could save memory / deal with this case in a standardized way.", "> . Would it be worth having some logic to generate this explicit feature specification automatically if a type annotation for a .map returns a dataclass that can be inferred?\r\n\r\nInteresting proposal! Yes, we could consider doing this if the (return) type hint is `TypedDict`, and raise an error that type hints are incorrect if the cast using the inferred types fails.", "@mariosasko Put up an initial PR to implement this proposal. Let me know your thoughts on direction and what else should be in-scope here." ]
2023-06-19T14:16:14
2023-07-26T15:13:53
2023-07-26T15:13:53
NONE
null
null
null
### Describe the bug When doing a map of a dataset with complex types, sometimes `datasets` is unable to interpret the valid schema of a returned datasets.map() function. This often comes from conflicting types, like when both empty lists and filled lists are competing for the same field value. This is prone to happen in batch mapping, when the mapper returns a sequence of null/empty values and other batches are non-null. A workaround is to manually cast the new batch to a pyarrow table (like implemented in this [workaround](https://github.com/piercefreeman/lassen/pull/3)) but it feels like this ideally should be solved at the core library level. Note that the reproduction case only throws this error if the first datapoint has the empty list. If it is processed later, datasets already detects its representation as list-type and therefore allows the empty list to be provided. ### Steps to reproduce the bug A trivial reproduction case: ```python from typing import Iterator, Any import pandas as pd from datasets import Dataset def batch_to_examples(batch: dict[str, list[Any]]) -> Iterator[dict[str, Any]]: for i in range(next(iter(lengths))): yield {feature: values[i] for feature, values in batch.items()} def examples_to_batch(examples) -> dict[str, list[Any]]: batch = {} for example in examples: for feature, value in example.items(): if feature not in batch: batch[feature] = [] batch[feature].append(value) return batch def batch_process(examples, explicit_schema: bool): new_examples = [] for example in batch_to_examples(examples): new_examples.append(dict(texts=example["raw_text"].split())) return examples_to_batch(new_examples) df = pd.DataFrame( [ {"raw_text": ""}, {"raw_text": "This is a test"}, {"raw_text": "This is another test"}, ] ) dataset = Dataset.from_pandas(df) # datasets won't be able to typehint a dataset that starts with an empty example. with pytest.raises(TypeError, match="Couldn't cast array of type"): dataset = dataset.map( batch_process, batched=True, batch_size=1, num_proc=1, remove_columns=dataset.column_names, ) ``` This results in crashes like: ```bash File "/Users/piercefreeman/Library/Caches/pypoetry/virtualenvs/example-9kBqeSPy-py3.11/lib/python3.11/site-packages/datasets/table.py", line 1819, in wrapper return func(array, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/piercefreeman/Library/Caches/pypoetry/virtualenvs/example-9kBqeSPy-py3.11/lib/python3.11/site-packages/datasets/table.py", line 2109, in cast_array_to_feature return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/piercefreeman/Library/Caches/pypoetry/virtualenvs/example-9kBqeSPy-py3.11/lib/python3.11/site-packages/datasets/table.py", line 1819, in wrapper return func(array, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/piercefreeman/Library/Caches/pypoetry/virtualenvs/example-9kBqeSPy-py3.11/lib/python3.11/site-packages/datasets/table.py", line 1998, in array_cast raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}") TypeError: Couldn't cast array of type string to null ``` ### Expected behavior The code should successfully map and create a new dataset without error. ### Environment info Mac OSX, Linux
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5965/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5965/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5963
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5963/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5963/comments
https://api.github.com/repos/huggingface/datasets/issues/5963/events
https://github.com/huggingface/datasets/issues/5963
1,762,774,457
I_kwDODunzps5pEc25
5,963
Got an error _pickle.PicklingError use Dataset.from_spark.
{ "login": "yanzia12138", "id": 112800614, "node_id": "U_kgDOBrkzZg", "avatar_url": "https://avatars.githubusercontent.com/u/112800614?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yanzia12138", "html_url": "https://github.com/yanzia12138", "followers_url": "https://api.github.com/users/yanzia12138/followers", "following_url": "https://api.github.com/users/yanzia12138/following{/other_user}", "gists_url": "https://api.github.com/users/yanzia12138/gists{/gist_id}", "starred_url": "https://api.github.com/users/yanzia12138/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yanzia12138/subscriptions", "organizations_url": "https://api.github.com/users/yanzia12138/orgs", "repos_url": "https://api.github.com/users/yanzia12138/repos", "events_url": "https://api.github.com/users/yanzia12138/events{/privacy}", "received_events_url": "https://api.github.com/users/yanzia12138/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "i got error using method from_spark when using multi-node Spark cluster. seems could only use \"from_spark\" in local?", "@lhoestq ", "cc @maddiedawson it looks like there an issue with `_validate_cache_dir` ?\r\n\r\nIt looks like the function passed to mapPartitions has a reference to the Spark dataset builder, and therefore contains the SparkContext itself.\r\n\r\nI think it can be fixed by defining `create_cache_and_write_probe` outside the Spark dataset builder, and pass a `partial(create_cache_and_write_probe, cache_dir=self._cache_dir)` to `mapPartitions`", "Just saw this; thanks for flagging! Your proposed solution sounds good. I can prepare a PR", "@maddiedawson can you show me the demo ,so i can test in local .before your PR" ]
2023-06-19T05:30:35
2023-07-24T11:55:46
2023-07-24T11:55:46
NONE
null
null
null
python 3.9.2 Got an error _pickle.PicklingError use Dataset.from_spark. Did the dataset import load data from spark dataframe using multi-node Spark cluster df = spark.read.parquet(args.input_data).repartition(50) ds = Dataset.from_spark(df, keep_in_memory=True, cache_dir="/pnc-data/data/nuplan/t5_spark/cache_data") ds.save_to_disk(args.output_data) Error : _pickle.PicklingError: Could not serialize object: RuntimeError: It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transforma tion. SparkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063. 23/06/16 21:17:20 WARN ExecutorPodsWatchSnapshotSource: Kubernetes client has been closed (this is expected if the application is shutting down.) _Originally posted by @yanzia12138 in https://github.com/huggingface/datasets/issues/5701#issuecomment-1594674306_ W Traceback (most recent call last): File "/home/work/main.py", line 100, in <module> run(args) File "/home/work/main.py", line 80, in run ds = Dataset.from_spark(df1, keep_in_memory=True, File "/home/work/.local/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 1281, in from_spark return SparkDatasetReader( File "/home/work/.local/lib/python3.9/site-packages/datasets/io/spark.py", line 53, in read self.builder.download_and_prepare( File "/home/work/.local/lib/python3.9/site-packages/datasets/builder.py", line 909, in download_and_prepare self._download_and_prepare( File "/home/work/.local/lib/python3.9/site-packages/datasets/builder.py", line 1004, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/work/.local/lib/python3.9/site-packages/datasets/packaged_modules/spark/spark.py", line 254, in _prepare_split self._validate_cache_dir() File "/home/work/.local/lib/python3.9/site-packages/datasets/packaged_modules/spark/spark.py", line 122, in _validate_cache_dir self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() File "/home/work/.local/lib/python3.9/site-packages/pyspark/rdd.py", line 950, in collect sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) File "/home/work/.local/lib/python3.9/site-packages/pyspark/rdd.py", line 2951, in _jrdd wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer, File "/home/work/.local/lib/python3.9/site-packages/pyspark/rdd.py", line 2830, in _wrap_function pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command) File "/home/work/.local/lib/python3.9/site-packages/pyspark/rdd.py", line 2816, in _prepare_for_python_RDD pickled_command = ser.dumps(command) File "/home/work/.local/lib/python3.9/site-packages/pyspark/serializers.py", line 447, in dumps raise pickle.PicklingError(msg) _pickle.PicklingError: Could not serialize object: RuntimeError: It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transformation. S parkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063. 23/06/19 13:51:21 WARN ExecutorPodsWatchSnapshotSource: Kubernetes client has been closed (this is expected if the application is shutting down.)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5963/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5963/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5962
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5962/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5962/comments
https://api.github.com/repos/huggingface/datasets/issues/5962/events
https://github.com/huggingface/datasets/issues/5962
1,761,589,882
I_kwDODunzps5o_7p6
5,962
Issue with train_test_split maintaining the same underlying PyArrow Table
{ "login": "Oziel14", "id": 70730520, "node_id": "MDQ6VXNlcjcwNzMwNTIw", "avatar_url": "https://avatars.githubusercontent.com/u/70730520?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Oziel14", "html_url": "https://github.com/Oziel14", "followers_url": "https://api.github.com/users/Oziel14/followers", "following_url": "https://api.github.com/users/Oziel14/following{/other_user}", "gists_url": "https://api.github.com/users/Oziel14/gists{/gist_id}", "starred_url": "https://api.github.com/users/Oziel14/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Oziel14/subscriptions", "organizations_url": "https://api.github.com/users/Oziel14/orgs", "repos_url": "https://api.github.com/users/Oziel14/repos", "events_url": "https://api.github.com/users/Oziel14/events{/privacy}", "received_events_url": "https://api.github.com/users/Oziel14/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[]
2023-06-17T02:19:58
2023-06-17T02:19:58
null
NONE
null
null
null
### Describe the bug I've been using the train_test_split method in the datasets module to split my HuggingFace Dataset into separate training, validation, and testing subsets. However, I've noticed an issue where the split datasets appear to maintain the same underlying PyArrow Table. ### Steps to reproduce the bug 1. Load any dataset ```dataset = load_dataset("lhoestq/demo1")``` 2. Try the next code: ```python from datasets import Dataset, DatasetDict train_size = 0.6 split_train = dataset["train"].train_test_split( train_size=train_size, ) separate_dataset_dict = DatasetDict({ "train": split_train["train"], "test": split_train["test"], }) ``` 3. The next code ```print(separate_dataset_dict)``` when printing the dataset it gives the indication that they have 3 and 2 rows respectively. 4. But the next code: ```python print(len(separate_dataset_dict["train"].data['id'])) print(len(separate_dataset_dict["test"].data['id'])) ``` Indicates that both tables still have 5 rows. ### Expected behavior However, I've noticed that train_test_split["train"].data, test_val_split["train"].data, and test_val_split["test"].data are identical, suggesting that they all point to the same underlying PyArrow Table. This means that the split datasets are not independent, as I expected. I believe this is a bug in the train_test_split implementation, as I would expect this function to return datasets with separate underlying PyArrow Tables. Could you please help me understand if this is expected behavior, or if there's a workaround to create truly independent split datasets? I would appreciate any assistance with this issue. Thank you. ### Environment info I tried in Colab: - `datasets` version: 2.13.0 - Platform: Windows-10-10.0.22621-SP0 - Python version: 3.10.11 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1 and my PC: - `datasets` version: 2.13.0 - Platform: Linux-5.15.107+-x86_64-with-glibc2.31 - Python version: 3.10.12 - Huggingface_hub version: 0.15.1 - PyArrow version: 9.0.0 - Pandas version: 1.5.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5962/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5962/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5961
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5961/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5961/comments
https://api.github.com/repos/huggingface/datasets/issues/5961/events
https://github.com/huggingface/datasets/issues/5961
1,758,525,111
I_kwDODunzps5o0Pa3
5,961
IterableDataset: split by node and map may preprocess samples that will be skipped anyway
{ "login": "johnchienbronci", "id": 27708347, "node_id": "MDQ6VXNlcjI3NzA4MzQ3", "avatar_url": "https://avatars.githubusercontent.com/u/27708347?v=4", "gravatar_id": "", "url": "https://api.github.com/users/johnchienbronci", "html_url": "https://github.com/johnchienbronci", "followers_url": "https://api.github.com/users/johnchienbronci/followers", "following_url": "https://api.github.com/users/johnchienbronci/following{/other_user}", "gists_url": "https://api.github.com/users/johnchienbronci/gists{/gist_id}", "starred_url": "https://api.github.com/users/johnchienbronci/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/johnchienbronci/subscriptions", "organizations_url": "https://api.github.com/users/johnchienbronci/orgs", "repos_url": "https://api.github.com/users/johnchienbronci/repos", "events_url": "https://api.github.com/users/johnchienbronci/events{/privacy}", "received_events_url": "https://api.github.com/users/johnchienbronci/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Does \"number of shards\" refer to the total number of data?\r\n\r\nmy config:\r\nnproc_per_node=2\r\nds=ds['train'] = load_dataset(streaming=True).take(50000)\r\n\r\nI'm test again: in prepare_data(), data have the same for each GPU\r\n", "The number of shards is `ds.n_shards`. It corresponds generally to the number of files the dataset is made of, to be able to distribute to several nodes.\r\n\r\n**You don't end up with the same data per GPU**. But all the samples are going through your preprocessing function you pass to map. They are just skipped afterwards to only keep 1 sample out of n(GPUs)", "For each GPU, although see the same data in prepare_data(), the actual training data will not be the same in the end. \r\nIs my understanding correct?\r\n\r\nWhere can I print the actual training data for each GPU?", "> For each GPU, although see the same data in prepare_data(), the actual training data will not be the same in the end.\r\nIs my understanding correct?\r\n\r\nYes exactly :)\r\n\r\n> Where can I print the actual training data for each GPU?\r\n\r\nYou should call print in the data_collator", "I print out n_shards, and under multiple GPUs, this value is always 1.\r\nIs this value correct?", "Yes it's correct, and it explains why you always have the same data passed to your map function (the data can't be split).\r\n\r\nBut after being passed to `map`, each GPU keeps one example out of n(GPUs) so that you don't end up with duplicate data across GPUs", "> > For each GPU, although see the same data in prepare_data(), the actual training data will not be the same in the end.\r\n> > Is my understanding correct?\r\n> \r\n> Yes exactly :)\r\n> \r\n> > Where can I print the actual training data for each GPU?\r\n> \r\n> You should call print in the data_collator\r\n\r\nOK, when printing the train data in the data collator, each GPU sees different data.\r\n\r\nThanks for your reply", "Do we have a solution for this one? Or it's required to get \"number of shards is a factor of number of GPUs: in that case the shards are evenly distributed per GPU\"", "For now it's required to have a number of shards that is a factor of the number of GPUs to not have all the workers process the same data (and then skip the right ones to not end up training on duplicate data).\r\n\r\nIt would be quite complex to implement a strategy that would utilize all the GPUs with an arbitrary number of shards even at the end of training" ]
2023-06-15T10:29:10
2023-09-01T10:35:11
null
NONE
null
null
null
There are two ways an iterable dataset can be split by node: 1. if the number of shards is a factor of number of GPUs: in that case the shards are evenly distributed per GPU 2. otherwise, each GPU iterate on the data and at the end keeps 1 sample out of n(GPUs) - skipping the others. In case 2. it's therefore possible to have the same examples passed to `prepare_dataset` for each GPU. This doesn't sound optimized though, because it runs the preprocessing on samples that won't be used in the end. Could you open a new issue so that we can discuss about this and find a solution ? _Originally posted by @lhoestq in https://github.com/huggingface/datasets/issues/5360#issuecomment-1592729051_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5961/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5961/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5959
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5959/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5959/comments
https://api.github.com/repos/huggingface/datasets/issues/5959/events
https://github.com/huggingface/datasets/issues/5959
1,757,397,507
I_kwDODunzps5ov8ID
5,959
read metric glue.py from local file
{ "login": "JiazhaoLi", "id": 31148397, "node_id": "MDQ6VXNlcjMxMTQ4Mzk3", "avatar_url": "https://avatars.githubusercontent.com/u/31148397?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JiazhaoLi", "html_url": "https://github.com/JiazhaoLi", "followers_url": "https://api.github.com/users/JiazhaoLi/followers", "following_url": "https://api.github.com/users/JiazhaoLi/following{/other_user}", "gists_url": "https://api.github.com/users/JiazhaoLi/gists{/gist_id}", "starred_url": "https://api.github.com/users/JiazhaoLi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JiazhaoLi/subscriptions", "organizations_url": "https://api.github.com/users/JiazhaoLi/orgs", "repos_url": "https://api.github.com/users/JiazhaoLi/repos", "events_url": "https://api.github.com/users/JiazhaoLi/events{/privacy}", "received_events_url": "https://api.github.com/users/JiazhaoLi/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Sorry, I solve this by call `evaluate.load('glue_metric.py','sst-2')`\r\n" ]
2023-06-14T17:59:35
2023-06-14T18:04:16
2023-06-14T18:04:16
NONE
null
null
null
### Describe the bug Currently, The server is off-line. I am using the glue metric from the local file downloaded from the hub. I download / cached datasets using `load_dataset('glue','sst2', cache_dir='/xxx')` to cache them and then in the off-line mode, I use `load_dataset('xxx/glue.py','sst2', cache_dir='/xxx')`. I can successfully reuse cached datasets. My problem is about the load_metric. When I run `load_dataset('xxx/glue_metric.py','sst2',cache_dir='/xxx')` , it returns ` File "xx/lib64/python3.9/site-packages/datasets/utils/deprecation_utils.py", line 46, in wrapper return deprecated_function(*args, **kwargs) File "xx//lib64/python3.9/site-packages/datasets/load.py", line 1392, in load_metric metric = metric_cls( TypeError: 'NoneType' object is not callable` Thanks in advance for help! ### Steps to reproduce the bug N/A ### Expected behavior N/A ### Environment info `datasets == 2.12.0`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5959/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5959/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5955
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5955/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5955/comments
https://api.github.com/repos/huggingface/datasets/issues/5955/events
https://github.com/huggingface/datasets/issues/5955
1,756,827,133
I_kwDODunzps5otw39
5,955
Strange bug in loading local JSON files, using load_dataset
{ "login": "Night-Quiet", "id": 73934131, "node_id": "MDQ6VXNlcjczOTM0MTMx", "avatar_url": "https://avatars.githubusercontent.com/u/73934131?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Night-Quiet", "html_url": "https://github.com/Night-Quiet", "followers_url": "https://api.github.com/users/Night-Quiet/followers", "following_url": "https://api.github.com/users/Night-Quiet/following{/other_user}", "gists_url": "https://api.github.com/users/Night-Quiet/gists{/gist_id}", "starred_url": "https://api.github.com/users/Night-Quiet/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Night-Quiet/subscriptions", "organizations_url": "https://api.github.com/users/Night-Quiet/orgs", "repos_url": "https://api.github.com/users/Night-Quiet/repos", "events_url": "https://api.github.com/users/Night-Quiet/events{/privacy}", "received_events_url": "https://api.github.com/users/Night-Quiet/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "This is the actual error:\r\n```\r\nFailed to read file '/home/lakala/hjc/code/pycode/glm/temp.json' with error <class 'pyarrow.lib.ArrowInvalid'>: cannot mix list and non-list, non-null values\r\n```\r\nWhich means some samples are incorrectly formatted.\r\n\r\nPyArrow, a storage backend that we use under the hood, requires that all the list elements have the same level of nesting (same number of dimensions) or are `None`.\r\n```python\r\nimport pyarrow as pa\r\npa.array([[1, 2, 3], 2]) # ArrowInvalid: cannot mix list and non-list, non-null values\r\npa.array([[1, 2, 3], [2]]) # works\r\n``` ", "@mariosasko \r\nI used the same operation to check the original data before and after slicing.\r\nThis is reflected in my code.\r\n160000 is not a specific number.\r\nI can also get output using 150000.\r\nThis doesn't seem to align very well with what you said.\r\nBecause if only some sample formats are incorrect.\r\nSo there should be an error in one of the front and back slices.\r\nthank you for your reply.", "Our JSON loader does the following in your case:\r\n\r\n```python\r\nimport json\r\nimport pyarrow as pa\r\n\r\nwith open(file, encoding=\"utf-8\") as f:\r\n dataset = json.load(f)\r\nkeys = set().union(*[row.keys() for row in dataset])\r\nmapping = {col: [row.get(col) for row in dataset] for col in keys}\r\npa_table = pa.Table.from_pydict(mapping) # the ArrowInvalid error comes from here\r\n```\r\n\r\nSo if this code throws an error with correctly-formatted JSON, then this is an Arrow bug and should be reported in their repo.\r\n\r\n> I used the same operation to check the original data before and after slicing.\r\nThis is reflected in my code.\r\n160000 is not a specific number.\r\nI can also get output using 150000.\r\nThis doesn't seem to align very well with what you said.\r\nBecause if only some sample formats are incorrect.\r\nSo there should be an error in one of the front and back slices.\r\n\r\nYou should shuffle the data to make sure that's not the case", "@mariosasko \r\nThank you.\r\nI will try again." ]
2023-06-14T12:46:00
2023-06-21T14:42:15
2023-06-21T14:42:15
NONE
null
null
null
### Describe the bug I am using 'load_dataset 'loads a JSON file, but I found a strange bug: an error will be reported when the length of the JSON file exceeds 160000 (uncertain exact number). I have checked the data through the following code and there are no issues. So I cannot determine the true reason for this error. The data is a list containing a dictionary. As follows: [ {'input': 'someting...', 'target': 'someting...', 'type': 'someting...', 'history': ['someting...', ...]}, ... ] ### Steps to reproduce the bug ``` import json from datasets import load_dataset path = "target.json" temp_path = "temp.json" with open(path, "r") as f: data = json.load(f) print(f"\n-------the JSON file length is: {len(data)}-------\n") with open(temp_path, "w") as f: json.dump(data[:160000], f) dataset = load_dataset("json", data_files=temp_path) print("\n-------This works when the JSON file length is 160000-------\n") with open(temp_path, "w") as f: json.dump(data[160000:], f) dataset = load_dataset("json", data_files=temp_path) print("\n-------This works and eliminates data issues-------\n") with open(temp_path, "w") as f: json.dump(data[:170000], f) dataset = load_dataset("json", data_files=temp_path) ``` ### Expected behavior ``` -------the JSON file length is: 173049------- Downloading and preparing dataset json/default to /root/.cache/huggingface/datasets/json/default-acf3c7f418c5f4b4/0.0.0/e347ab1c932092252e717ff3f949105a4dd28b27e842dd53157d2f72e276c2e4... Downloading data files: 100%|███████████████████| 1/1 [00:00<00:00, 3328.81it/s] Extracting data files: 100%|█████████████████████| 1/1 [00:00<00:00, 639.47it/s] Dataset json downloaded and prepared to /root/.cache/huggingface/datasets/json/default-acf3c7f418c5f4b4/0.0.0/e347ab1c932092252e717ff3f949105a4dd28b27e842dd53157d2f72e276c2e4. Subsequent calls will reuse this data. 100%|████████████████████████████████████████████| 1/1 [00:00<00:00, 265.85it/s] -------This works when the JSON file length is 160000------- Downloading and preparing dataset json/default to /root/.cache/huggingface/datasets/json/default-a42f04b263ceea6a/0.0.0/e347ab1c932092252e717ff3f949105a4dd28b27e842dd53157d2f72e276c2e4... Downloading data files: 100%|███████████████████| 1/1 [00:00<00:00, 2038.05it/s] Extracting data files: 100%|█████████████████████| 1/1 [00:00<00:00, 794.83it/s] Dataset json downloaded and prepared to /root/.cache/huggingface/datasets/json/default-a42f04b263ceea6a/0.0.0/e347ab1c932092252e717ff3f949105a4dd28b27e842dd53157d2f72e276c2e4. Subsequent calls will reuse this data. 100%|████████████████████████████████████████████| 1/1 [00:00<00:00, 681.00it/s] -------This works and eliminates data issues------- Downloading and preparing dataset json/default to /root/.cache/huggingface/datasets/json/default-63f391c89599c7b0/0.0.0/e347ab1c932092252e717ff3f949105a4dd28b27e842dd53157d2f72e276c2e4... Downloading data files: 100%|███████████████████| 1/1 [00:00<00:00, 3682.44it/s] Extracting data files: 100%|█████████████████████| 1/1 [00:00<00:00, 788.70it/s] Generating train split: 0 examples [00:00, ? examples/s]Failed to read file '/home/lakala/hjc/code/pycode/glm/temp.json' with error <class 'pyarrow.lib.ArrowInvalid'>: cannot mix list and non-list, non-null values Traceback (most recent call last): File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/builder.py", line 1858, in _prepare_split_single for _, table in generator: File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/packaged_modules/json/json.py", line 146, in _generate_tables raise ValueError(f"Not able to read records in the JSON file at {file}.") from None ValueError: Not able to read records in the JSON file at /home/lakala/hjc/code/pycode/glm/temp.json. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/lakala/hjc/code/pycode/glm/test.py", line 22, in <module> dataset = load_dataset("json", data_files=temp_path) File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/load.py", line 1797, in load_dataset builder_instance.download_and_prepare( File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/builder.py", line 890, in download_and_prepare self._download_and_prepare( File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/builder.py", line 985, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/builder.py", line 1746, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/home/lakala/conda/envs/glm/lib/python3.8/site-packages/datasets/builder.py", line 1891, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.builder.DatasetGenerationError: An error occurred while generating the dataset ``` ### Environment info ``` Ubuntu==22.04 python==3.8 pytorch-transformers==1.2.0 transformers== 4.27.1 datasets==2.12.0 numpy==1.24.3 pandas==1.5.3 ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5955/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5955/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5953
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5953/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5953/comments
https://api.github.com/repos/huggingface/datasets/issues/5953/events
https://github.com/huggingface/datasets/issues/5953
1,756,520,523
I_kwDODunzps5osmBL
5,953
Bad error message when trying to download gated dataset
{ "login": "patrickvonplaten", "id": 23423619, "node_id": "MDQ6VXNlcjIzNDIzNjE5", "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/patrickvonplaten", "html_url": "https://github.com/patrickvonplaten", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "cc @sanchit-gandhi @Vaibhavs10 @lhoestq - this is mainly for demos that use Common Voice datasets as done here: https://github.com/facebookresearch/fairseq/tree/main/examples/mms#-transformers\r\n", "Hi ! the error for me is\r\n\r\n```\r\nFileNotFoundError: Couldn't find a dataset script at /content/mozilla-foundation/common_voice_13_0/common_voice_13_0.py or any data file in the same directory. Couldn't find 'mozilla-foundation/common_voice_13_0' on the Hugging Face Hub either: FileNotFoundError: Dataset 'mozilla-foundation/common_voice_13_0' doesn't exist on the Hub. If the repo is private or gated, make sure to log in with `huggingface-cli login`.\r\n```\r\n\r\nAnd tbh idk how you managed to get your error. \"n_shards.json\" is not even a thing in `datasets`", "Okay, I am able to reproduce @patrickvonplaten's original error: https://github.com/Vaibhavs10/scratchpad/blob/main/cv13_datasets_test.ipynb\r\n\r\nAlso not sure why it looks for `n_shards.json`", "Ok I see, this file is downloaded from the CV dataset script - let me investigate", "Ok I see: when you log out you no longer have access to the repository.\r\n\r\nTherefore the dataset script is loaded from cache:\r\n```\r\nWARNING:datasets.load:Using the latest cached version of the module from /root/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_13_0/22809012aac1fc9803eaffc44122e4149043748e93933935d5ea19898587e4d7 (last modified on Wed Jun 14 10:13:17 2023) since it couldn't be found locally at mozilla-foundation/common_voice_13_0., or remotely on the Hugging Face Hub.\r\n```\r\n\r\nand the script tries to download the n_shards.json but fails", "Is this ok for you https://github.com/huggingface/datasets/pull/5954 ?\r\n\r\nI'll do a release this afternoon", "Cool! ", "this is included in the new release 2.13.0" ]
2023-06-14T10:03:39
2023-06-14T16:36:51
2023-06-14T12:26:32
CONTRIBUTOR
null
null
null
### Describe the bug When I attempt to download a model from the Hub that is gated without being logged in, I get a nice error message. E.g.: E.g. ```sh Repository Not Found for url: https://huggingface.co/api/models/DeepFloyd/IF-I-XL-v1.0. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. Invalid username or password.. Will try to load from local cache. ``` If I do the same for a gated dataset on the Hub, I'm not gated a nice error message IMO: ```sh File ~/hf/lib/python3.10/site-packages/fsspec/implementations/http.py:430, in HTTPFileSystem._info(self, url, **kwargs) 427 except Exception as exc: 428 if policy == "get": 429 # If get failed, then raise a FileNotFoundError --> 430 raise FileNotFoundError(url) from exc 431 logger.debug(str(exc)) 433 return {"name": url, "size": None, **info, "type": "file"} FileNotFoundError: https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0/resolve/main/n_shards.json ``` ### Steps to reproduce the bug ``` huggingface-cli logout ``` and then: ```py from datasets import load_dataset, Audio # English stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) en_sample = next(iter(stream_data))["audio"]["array"] # Swahili stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "sw", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) sw_sample = next(iter(stream_data))["audio"]["array"] ``` ### Expected behavior Better error message ### Environment info Copy-and-paste the text below in your GitHub issue. - `datasets` version: 2.12.0 - Platform: Linux-6.2.0-76060200-generic-x86_64-with-glibc2.35 - Python version: 3.10.6 - Huggingface_hub version: 0.16.0.dev0 - PyArrow version: 11.0.0 - Pandas version: 1.5.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5953/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5953/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5951
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5951/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5951/comments
https://api.github.com/repos/huggingface/datasets/issues/5951/events
https://github.com/huggingface/datasets/issues/5951
1,756,363,546
I_kwDODunzps5or_sa
5,951
What is the Right way to use discofuse dataset??
{ "login": "akesh1235", "id": 125154243, "node_id": "U_kgDOB3Wzww", "avatar_url": "https://avatars.githubusercontent.com/u/125154243?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akesh1235", "html_url": "https://github.com/akesh1235", "followers_url": "https://api.github.com/users/akesh1235/followers", "following_url": "https://api.github.com/users/akesh1235/following{/other_user}", "gists_url": "https://api.github.com/users/akesh1235/gists{/gist_id}", "starred_url": "https://api.github.com/users/akesh1235/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akesh1235/subscriptions", "organizations_url": "https://api.github.com/users/akesh1235/orgs", "repos_url": "https://api.github.com/users/akesh1235/repos", "events_url": "https://api.github.com/users/akesh1235/events{/privacy}", "received_events_url": "https://api.github.com/users/akesh1235/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Thanks for opening https://huggingface.co/datasets/discofuse/discussions/3, let's continue the discussion over there if you don't mind", "I have posted there also sir, please check\r\n@lhoestq" ]
2023-06-14T08:38:39
2023-06-14T13:25:06
2023-06-14T12:10:16
NONE
null
null
null
[Click here for Dataset link](https://huggingface.co/datasets/discofuse/viewer/discofuse-wikipedia/train?row=6) **Below is the following way, as per my understanding , Is it correct :question: :question:** The **columns/features from `DiscoFuse dataset`** that will be the **input to the `encoder` and `decoder`** are: [Click here for Dataset link](https://huggingface.co/datasets/discofuse/viewer/discofuse-wikipedia/train?row=6) 1. **coherent_first_sentence** 2. **coherent_second_sentence** 3. **incoherent_first_sentence** 4. **incoherent_second_sentence** [Click here for Dataset link](https://huggingface.co/datasets/discofuse/viewer/discofuse-wikipedia/train?row=6) The **`encoder` will take these four columns as input and encode them into a sequence of hidden states. The `decoder` will then take these hidden states as input and decode them into a new sentence that fuses the two original sentences together.** The **discourse type, connective_string, has_coref_type_pronoun, and has_coref_type_nominal columns will not be used as input to the encoder or decoder.** These columns are used to provide additional information about the dataset, but they are not necessary for the task of sentence fusion. Please correct me if I am wrong; otherwise, if this understanding is right, how shall I implement this task practically?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5951/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5951/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5950
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5950/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5950/comments
https://api.github.com/repos/huggingface/datasets/issues/5950/events
https://github.com/huggingface/datasets/issues/5950
1,755,197,946
I_kwDODunzps5onjH6
5,950
Support for data with instance-wise dictionary as features
{ "login": "richardwth", "id": 33274336, "node_id": "MDQ6VXNlcjMzMjc0MzM2", "avatar_url": "https://avatars.githubusercontent.com/u/33274336?v=4", "gravatar_id": "", "url": "https://api.github.com/users/richardwth", "html_url": "https://github.com/richardwth", "followers_url": "https://api.github.com/users/richardwth/followers", "following_url": "https://api.github.com/users/richardwth/following{/other_user}", "gists_url": "https://api.github.com/users/richardwth/gists{/gist_id}", "starred_url": "https://api.github.com/users/richardwth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/richardwth/subscriptions", "organizations_url": "https://api.github.com/users/richardwth/orgs", "repos_url": "https://api.github.com/users/richardwth/repos", "events_url": "https://api.github.com/users/richardwth/events{/privacy}", "received_events_url": "https://api.github.com/users/richardwth/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Hi ! We use the Arrow columnar format under the hood, which doesn't support such dictionaries: each field must have a fixed type and exist in each sample.\r\n\r\nInstead you can restructure your data like\r\n```\r\n{\r\n \"index\": 0,\r\n \"keys\": [\"2 * x + y >= 3\"],\r\n \"values\": [[\"2 * x + y >= 3\", \"4 * x + 2 * y >= 6\"]],\r\n }\r\n},\r\n...\r\n{\r\n \"index\": 9999,\r\n \"keys\": [\"x >= 6\"],\r\n \"values\": [[\"x >= 6\", \"x >= 0\", \"x >= -1\"]],\r\n},\r\n...\r\n```" ]
2023-06-13T15:49:00
2023-06-14T12:13:38
null
NONE
null
null
null
### Feature request I notice that when loading data instances with feature type of python dictionary, the dictionary keys would be broadcast so that every instance has the same set of keys. Please see an example in the Motivation section. It is possible to avoid this behavior, i.e., load dictionary features as it is and do not broadcast the keys among instances? Please note that these dictionaries would have to be processed dynamically at each training iteration into strings (and tokenized). ### Motivation I am trying to load a dataset from a json file. Each instance of the dataset has a feature that is a dictionary but its keys depend on the instance. Every two instances may have different keys. For example, imagine a dataset that contains a set of math expressions from a bunch of mutually redundant expressions: ``` { "index": 0, "feature": { "2 * x + y >= 3": ["2 * x + y >= 3", "4 * x + 2 * y >= 6"], ... } }, ... { "index": 9999, "feature": { "x >= 6": ["x >= 6", "x >= 0", "x >= -1"], ... } }, ... ``` When directly loading the dataset using `data = load_dataset("json", data_files=file_paths, split='train')`, each instance would have all the keys from other instances and None as values. That is, instance of index 0 becomes: ``` { "index": 0, "feature": { "2 * x + y >= 3": ["2 * x + y >= 3", "4 * x + 2 * y >= 6"], ... "x >= 6": None, # keys from other instances ... } }, ``` This is not desirable. Moreover, issue would be raised if I attempt to combine two such datasets using `data = concatenate_datasets(multi_datasets)`, perhaps because their dictionary features contain different keys. A solution I can think of is to store the dictionary features as a long string, and evaluate it later. Please kindly suggest any other solution using existing methods of datasets. ### Your contribution N/A
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5950/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5950/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5947
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5947/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5947/comments
https://api.github.com/repos/huggingface/datasets/issues/5947/events
https://github.com/huggingface/datasets/issues/5947
1,754,359,316
I_kwDODunzps5okWYU
5,947
Return the audio filename when decoding fails due to corrupt files
{ "login": "wetdog", "id": 8949105, "node_id": "MDQ6VXNlcjg5NDkxMDU=", "avatar_url": "https://avatars.githubusercontent.com/u/8949105?v=4", "gravatar_id": "", "url": "https://api.github.com/users/wetdog", "html_url": "https://github.com/wetdog", "followers_url": "https://api.github.com/users/wetdog/followers", "following_url": "https://api.github.com/users/wetdog/following{/other_user}", "gists_url": "https://api.github.com/users/wetdog/gists{/gist_id}", "starred_url": "https://api.github.com/users/wetdog/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wetdog/subscriptions", "organizations_url": "https://api.github.com/users/wetdog/orgs", "repos_url": "https://api.github.com/users/wetdog/repos", "events_url": "https://api.github.com/users/wetdog/events{/privacy}", "received_events_url": "https://api.github.com/users/wetdog/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Hi ! The audio data don't always exist as files on disk - the blobs are often stored in the Arrow files. For now I'd suggest disabling decoding with `.cast_column(\"audio\", Audio(decode=False))` and apply your own decoding that handles corrupted files (maybe to filter them out ?)\r\n\r\ncc @sanchit-gandhi since it's related to our discussion about allowing users to make decoding return `None` and show a warning when there are corrupted files", "Thanks @lhoestq, I wasn't aware of the decode flag. It makes more sense as you say to show a warning when there are corrupted files together with some metadata of the file that allows to filter them from the dataset.\r\n\r\nMy workaround was to catch the LibsndfileError and generate a dummy audio with an unsual sample rate to filter it later. However returning `None` seems better. \r\n\r\n`try:\r\n array, sampling_rate = sf.read(file)\r\nexcept sf.LibsndfileError:\r\n print(\"bad file\")\r\n array = np.array([0.0])\r\n sampling_rate = 99.000` \r\n\r\n" ]
2023-06-13T08:44:09
2023-06-14T12:45:01
null
NONE
null
null
null
### Feature request Return the audio filename when the audio decoding fails. Although currently there are some checks for mp3 and opus formats with the library version there are still cases when the audio decoding could fail, eg. Corrupt file. ### Motivation When you try to load an object file dataset and the decoding fails you can't know which file is corrupt ``` raise LibsndfileError(err, prefix="Error opening {0!r}: ".format(self.name)) soundfile.LibsndfileError: Error opening <_io.BytesIO object at 0x7f5ab7e38290>: Format not recognised. ``` ### Your contribution Make a PR to Add exceptions for LIbsndfileError to return the audio filename or path when soundfile decoding fails.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5947/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5947/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5946
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5946/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5946/comments
https://api.github.com/repos/huggingface/datasets/issues/5946/events
https://github.com/huggingface/datasets/issues/5946
1,754,234,469
I_kwDODunzps5oj35l
5,946
IndexError Not Solving -> IndexError: Invalid key: ?? is out of bounds for size 0 or ??
{ "login": "syngokhan", "id": 70565543, "node_id": "MDQ6VXNlcjcwNTY1NTQz", "avatar_url": "https://avatars.githubusercontent.com/u/70565543?v=4", "gravatar_id": "", "url": "https://api.github.com/users/syngokhan", "html_url": "https://github.com/syngokhan", "followers_url": "https://api.github.com/users/syngokhan/followers", "following_url": "https://api.github.com/users/syngokhan/following{/other_user}", "gists_url": "https://api.github.com/users/syngokhan/gists{/gist_id}", "starred_url": "https://api.github.com/users/syngokhan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/syngokhan/subscriptions", "organizations_url": "https://api.github.com/users/syngokhan/orgs", "repos_url": "https://api.github.com/users/syngokhan/repos", "events_url": "https://api.github.com/users/syngokhan/events{/privacy}", "received_events_url": "https://api.github.com/users/syngokhan/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "https://colab.research.google.com/#scrollTo=AQ_HCYruWIHU&fileId=https%3A//huggingface.co/dfurman/falcon-40b-chat-oasst1/blob/main/finetune_falcon40b_oasst1_with_bnb_peft.ipynb\r\n\r\nI ran the same administration exactly the same but got the same error", "Looks related to https://discuss.huggingface.co/t/indexerror-invalid-key-16-is-out-of-bounds-for-size-0/14298/4?u=lhoestq", "> Looks related to https://discuss.huggingface.co/t/indexerror-invalid-key-16-is-out-of-bounds-for-size-0/14298/4?u=lhoestq\n\nThe problem has not been solved, I have tried this before, but the problem is the same", "> \r\n\r\n@syngokhan did u solve it? \r\nI am desperate ", "data = data[\"train\"].shuffle().map(generate_and_tokenize_prompt, batched = False) # change this line to -\r\n\r\ndata[\"train\"] = data[\"train\"].shuffle().map(generate_and_tokenize_prompt, batched = False)\r\nAfter doing this change you code should run fine.", "> > \r\n> \r\n> @syngokhan did u solve it? I am desperate\r\n\r\nrefer to my earlier comment. you will find the solution." ]
2023-06-13T07:34:15
2023-07-14T12:04:48
null
NONE
null
null
null
### Describe the bug in <cell line: 1>:1 │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/trainer.py:1537 in train │ │ │ │ 1534 │ │ inner_training_loop = find_executable_batch_size( │ │ 1535 │ │ │ self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size │ │ 1536 │ │ ) │ │ ❱ 1537 │ │ return inner_training_loop( │ │ 1538 │ │ │ args=args, │ │ 1539 │ │ │ resume_from_checkpoint=resume_from_checkpoint, │ │ 1540 │ │ │ trial=trial, │ │ │ │ /usr/local/lib/python3.10/dist-packages/transformers/trainer.py:1789 in _inner_training_loop │ │ │ │ 1786 │ │ │ │ rng_to_sync = True │ │ 1787 │ │ │ │ │ 1788 │ │ │ step = -1 │ │ ❱ 1789 │ │ │ for step, inputs in enumerate(epoch_iterator): │ │ 1790 │ │ │ │ total_batched_samples += 1 │ │ 1791 │ │ │ │ if rng_to_sync: │ │ 1792 │ │ │ │ │ self._load_rng_state(resume_from_checkpoint) │ │ │ │ /usr/local/lib/python3.10/dist-packages/accelerate/data_loader.py:377 in __iter__ │ │ │ │ 374 │ │ dataloader_iter = super().__iter__() │ │ 375 │ │ # We iterate one batch ahead to check when we are at the end │ │ 376 │ │ try: │ │ ❱ 377 │ │ │ current_batch = next(dataloader_iter) │ │ 378 │ │ except StopIteration: │ │ 379 │ │ │ yield │ │ 380 │ │ │ │ /usr/local/lib/python3.10/dist-packages/torch/utils/data/dataloader.py:633 in __next__ │ │ │ │ 630 │ │ │ if self._sampler_iter is None: │ │ 631 │ │ │ │ # TODO(https://github.com/pytorch/pytorch/issues/76750) │ │ 632 │ │ │ │ self._reset() # type: ignore[call-arg] │ │ ❱ 633 │ │ │ data = self._next_data() │ │ 634 │ │ │ self._num_yielded += 1 │ │ 635 │ │ │ if self._dataset_kind == _DatasetKind.Iterable and \ │ │ 636 │ │ │ │ │ self._IterableDataset_len_called is not None and \ │ │ │ │ /usr/local/lib/python3.10/dist-packages/torch/utils/data/dataloader.py:677 in _next_data │ │ │ │ 674 │ │ │ 675 │ def _next_data(self): │ │ 676 │ │ index = self._next_index() # may raise StopIteration │ │ ❱ 677 │ │ data = self._dataset_fetcher.fetch(index) # may raise StopIteration │ │ 678 │ │ if self._pin_memory: │ │ 679 │ │ │ data = _utils.pin_memory.pin_memory(data, self._pin_memory_device) │ │ 680 │ │ return data │ │ │ │ /usr/local/lib/python3.10/dist-packages/torch/utils/data/_utils/fetch.py:49 in fetch │ │ │ │ 46 │ def fetch(self, possibly_batched_index): │ │ 47 │ │ if self.auto_collation: │ │ 48 │ │ │ if hasattr(self.dataset, "__getitems__") and self.dataset.__getitems__: │ │ ❱ 49 │ │ │ │ data = self.dataset.__getitems__(possibly_batched_index) │ │ 50 │ │ │ else: │ │ 51 │ │ │ │ data = [self.dataset[idx] for idx in possibly_batched_index] │ │ 52 │ │ else: │ │ │ │ /usr/local/lib/python3.10/dist-packages/datasets/arrow_dataset.py:2782 in __getitems__ │ │ │ │ 2779 │ │ │ 2780 │ def __getitems__(self, keys: List) -> List: │ │ 2781 │ │ """Can be used to get a batch using a list of integers indices.""" │ │ ❱ 2782 │ │ batch = self.__getitem__(keys) │ │ 2783 │ │ n_examples = len(batch[next(iter(batch))]) │ │ 2784 │ │ return [{col: array[i] for col, array in batch.items()} for i in range(n_example │ │ 2785 │ │ │ │ /usr/local/lib/python3.10/dist-packages/datasets/arrow_dataset.py:2778 in __getitem__ │ │ │ │ 2775 │ │ │ 2776 │ def __getitem__(self, key): # noqa: F811 │ │ 2777 │ │ """Can be used to index columns (by string names) or rows (by integer index or i │ │ ❱ 2778 │ │ return self._getitem(key) │ │ 2779 │ │ │ 2780 │ def __getitems__(self, keys: List) -> List: │ │ 2781 │ │ """Can be used to get a batch using a list of integers indices.""" │ │ │ │ /usr/local/lib/python3.10/dist-packages/datasets/arrow_dataset.py:2762 in _getitem │ │ │ │ 2759 │ │ format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._ │ │ 2760 │ │ format_kwargs = format_kwargs if format_kwargs is not None else {} │ │ 2761 │ │ formatter = get_formatter(format_type, features=self._info.features, **format_kw │ │ ❱ 2762 │ │ pa_subtable = query_table(self._data, key, indices=self._indices if self._indice │ │ 2763 │ │ formatted_output = format_table( │ │ 2764 │ │ │ pa_subtable, key, formatter=formatter, format_columns=format_columns, output │ │ 2765 │ │ ) │ │ │ │ /usr/local/lib/python3.10/dist-packages/datasets/formatting/formatting.py:578 in query_table │ │ │ │ 575 │ │ _check_valid_column_key(key, table.column_names) │ │ 576 │ else: │ │ 577 │ │ size = indices.num_rows if indices is not None else table.num_rows │ │ ❱ 578 │ │ _check_valid_index_key(key, size) │ │ 579 │ # Query the main table │ │ 580 │ if indices is None: │ │ 581 │ │ pa_subtable = _query_table(table, key) │ │ │ │ /usr/local/lib/python3.10/dist-packages/datasets/formatting/formatting.py:531 in │ │ _check_valid_index_key │ │ │ │ 528 │ │ │ _check_valid_index_key(min(key), size=size) │ │ 529 │ elif isinstance(key, Iterable): │ │ 530 │ │ if len(key) > 0: │ │ ❱ 531 │ │ │ _check_valid_index_key(int(max(key)), size=size) │ │ 532 │ │ │ _check_valid_index_key(int(min(key)), size=size) │ │ 533 │ else: │ │ 534 │ │ _raise_bad_key_type(key) │ │ │ │ /usr/local/lib/python3.10/dist-packages/datasets/formatting/formatting.py:521 in │ │ _check_valid_index_key │ │ │ │ 518 def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: │ │ 519 │ if isinstance(key, int): │ │ 520 │ │ if (key < 0 and key + size < 0) or (key >= size): │ │ ❱ 521 │ │ │ raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") │ │ 522 │ │ return │ │ 523 │ elif isinstance(key, slice): │ │ 524 │ │ pass ### Steps to reproduce the bug `` import json import os from pprint import pprint import bitsandbytes as bnb import pandas as pd import torch import torch.nn as nn import transformers from datasets import Dataset,load_dataset from peft import ( LoraConfig, PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training ) from transformers import ( AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, ) os.environ["CUDA_VISIBLE_DEVICES"] = "0" def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) MODEL_NAME = "tiiuae/falcon-7b" bnb_config = BitsAndBytesConfig( load_in_4bit = True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, ) model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, device_map = "auto", trust_remote_code = True, quantization_config = bnb_config ) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) tokenizer.pad_token = tokenizer.eos_token model.gradient_checkpointing_enable() model = prepare_model_for_kbit_training(model) config = LoraConfig( r = 16, lora_alpha = 32, target_modules = ["query_key_value"], lora_dropout = 0.05, bias = "none", task_type = "CASUAL_LM" ) model = get_peft_model(model,config) print_trainable_parameters(model) def generate_prompt(data_point): return f""" <human>: {data_point["question"]} <assistant>: {data_point["answer"]} """.strip() def generate_and_tokenize_prompt(data_point): full_prompt = generate_prompt(data_point) tokenized_full_prompt = tokenizer(full_prompt, padding = True, truncation = True,return_tensors = None) return dict({ "input_ids" : tokenized_full_prompt["input_ids"], "attention_mask" : tokenized_full_prompt["attention_mask"] }) data = data["train"].shuffle().map(generate_and_tokenize_prompt, batched = False) OUTPUT_DIR = "experiments" trainings_args = transformers.TrainingArguments( per_device_train_batch_size = 1, gradient_accumulation_steps = 4, num_train_epochs = 1, learning_rate = 2e-4, fp16 = True, save_total_limit = 3, logging_steps = 1, output_dir = OUTPUT_DIR, max_steps = 80, optim = "paged_adamw_8bit", lr_scheduler_type = "cosine", warmup_ratio = 0.05, #remove_unused_columns=True ) trainer = transformers.Trainer( model = model, train_dataset = data, args = trainings_args, data_collator = transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() IndexError: Invalid key: 32 is out of bounds for size 0 DataSet Format is like : [{"question": "How can I create an account?", "answer": "To create an account, click on the 'Sign Up' button on the top right corner of our website and follow the instructions to complete the registration process."}, .... ] ### Expected behavior - ### Environment info !pip install -q pip !pip install -q bitsandbytes==0.39.0 !pip install -q torch==2.0.1 !pip install -q git+https://github.com/huggingface/transformers.git !pip install -q git+https://github.com/huggingface/peft.git !pip install -q git+https://github.com/huggingface/accelerate.git !pip install -q datasets !pip install -q loralib==0.1.1 !pip install -q einops==0.6.1 import json import os from pprint import pprint import bitsandbytes as bnb import pandas as pd import torch import torch.nn as nn import transformers from datasets import Dataset,load_dataset from peft import ( LoraConfig, PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training ) from transformers import ( AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, ) os.environ["CUDA_VISIBLE_DEVICES"] = "0"
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5946/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5946/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5945
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5945/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5945/comments
https://api.github.com/repos/huggingface/datasets/issues/5945/events
https://github.com/huggingface/datasets/issues/5945
1,754,084,577
I_kwDODunzps5ojTTh
5,945
Failing to upload dataset to the hub
{ "login": "Ar770", "id": 77382661, "node_id": "MDQ6VXNlcjc3MzgyNjYx", "avatar_url": "https://avatars.githubusercontent.com/u/77382661?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Ar770", "html_url": "https://github.com/Ar770", "followers_url": "https://api.github.com/users/Ar770/followers", "following_url": "https://api.github.com/users/Ar770/following{/other_user}", "gists_url": "https://api.github.com/users/Ar770/gists{/gist_id}", "starred_url": "https://api.github.com/users/Ar770/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Ar770/subscriptions", "organizations_url": "https://api.github.com/users/Ar770/orgs", "repos_url": "https://api.github.com/users/Ar770/repos", "events_url": "https://api.github.com/users/Ar770/events{/privacy}", "received_events_url": "https://api.github.com/users/Ar770/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! Feel free to re-run your code later, it will resume automatically where you left", "Tried many times in the last 2 weeks, problem remains.", "Alternatively you can save your dataset in parquet files locally and upload them to the hub manually\r\n\r\n```python\r\nfrom tqdm import tqdm\r\nnum_shards = 60\r\nfor index in tqdm(range(num_shards)):\r\n ds.shard(num_shards=num_shards, index=index, contiguous=True).to_parquet(f\"{index:05d}.parquet\")\r\n````" ]
2023-06-13T05:46:46
2023-07-24T11:56:40
2023-07-24T11:56:40
NONE
null
null
null
### Describe the bug Trying to upload a dataset of hundreds of thousands of audio samples (the total volume is not very large, 60 gb) to the hub with push_to_hub, it doesn't work. From time to time one piece of the data (parquet) gets pushed and then I get RemoteDisconnected even though my internet is stable. Please help. I'm trying to upload the dataset for almost a week. Thanks ### Steps to reproduce the bug not relevant ### Expected behavior Be able to upload thedataset ### Environment info python: 3.9
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5945/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5945/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5941
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5941/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5941/comments
https://api.github.com/repos/huggingface/datasets/issues/5941/events
https://github.com/huggingface/datasets/issues/5941
1,751,838,897
I_kwDODunzps5oavCx
5,941
Load Data Sets Too Slow In Train Seq2seq Model
{ "login": "xyx361100238", "id": 19569322, "node_id": "MDQ6VXNlcjE5NTY5MzIy", "avatar_url": "https://avatars.githubusercontent.com/u/19569322?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xyx361100238", "html_url": "https://github.com/xyx361100238", "followers_url": "https://api.github.com/users/xyx361100238/followers", "following_url": "https://api.github.com/users/xyx361100238/following{/other_user}", "gists_url": "https://api.github.com/users/xyx361100238/gists{/gist_id}", "starred_url": "https://api.github.com/users/xyx361100238/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xyx361100238/subscriptions", "organizations_url": "https://api.github.com/users/xyx361100238/orgs", "repos_url": "https://api.github.com/users/xyx361100238/repos", "events_url": "https://api.github.com/users/xyx361100238/events{/privacy}", "received_events_url": "https://api.github.com/users/xyx361100238/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! you can speed it up using multiprocessing by passing `num_proc=` to `load_dataset()`", "already did,but not useful for step Generating train split,it works in step \"Resolving data files\" & \"Downloading data files\" ", "@mariosasko some advice , thanks!", "I met the same problem, terrible experience", "@mariosasko ", "We need more info about the issue to provide help. \r\n\r\nCan you interrupt the process (with `num_proc=None`) after the `load_dataset` call when the slowdown occurs? So we can know what part of the code is causing it.\r\n\r\nThe `audiofolder` \\ `imagefolder` with metadata is not performant for large datasets. Luckily, we can make them much faster if drop the nested metadata files feature (not that useful). I plan to work on this soon.\r\n\r\nIn the meantime, it's better to use `Dataset.from_generator` (requires replacing the `load_dataset` calls in the transformers script with `Dataset.from_generator`) or write a dataset loading script for large datasets.", "Can you interrupt the process (with num_proc=None) after the load_dataset call when the slowdown occurs? So we can know what part of the code is causing it.\r\n(I'll try this operation)\r\nThe audiofolder \\ imagefolder with metadata is not performant for large datasets. Luckily, we can make them much faster if drop the nested metadata files feature (not that useful). I plan to work on this soon.\r\n(My data is indeed a bit large, exceeding 10000 hours of audio data. Looking forward to your improvement work very much)\r\n\r\nIn the meantime, it's better to use Dataset.from_generator (requires replacing the load_dataset calls in the transformers script with Dataset.from_generator) or write a dataset loading script for large datasets.\r\n(I want to use Dataset.from_generator instead of load_dataset ,where can i found sample code to load audio&label dataset, I was to do asr task)", "Can you interrupt the process (with num_proc=None) after the load_dataset call when the slowdown occurs? So we can know what part of the code is causing it.\r\n================================================================================\r\nHere is the log:\r\n[load_dataset.log](https://github.com/huggingface/datasets/files/12169362/load_dataset.log)\r\n(The larger my training data, the slower it loads)\r\n![image](https://github.com/huggingface/datasets/assets/19569322/381b73e4-0a54-4240-b95e-cb8164584047)\r\n\r\n", "In the meantime, it's better to use Dataset.from_generator (requires replacing the load_dataset calls in the transformers script with Dataset.from_generator) or write a dataset loading script for large datasets.\r\n================================================================================\r\nI tried ‘Dataset. from_generator’ implements data loading, but the testing results show no improvement", "I have already solved this problem, referring to #5990 : read audio frist, then use data_generator to change format ." ]
2023-06-12T03:58:43
2023-08-15T02:52:22
2023-08-15T02:52:22
NONE
null
null
null
### Describe the bug step 'Generating train split' in load_dataset is too slow: ![image](https://github.com/huggingface/datasets/assets/19569322/d9b08eee-95fe-4741-a346-b70416c948f8) ### Steps to reproduce the bug Data: own data,16K16B Mono wav Oficial Script:[ run_speech_recognition_seq2seq.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py) Add Code: if data_args.data_path is not None: print(data_args.data_path) raw_datasets = load_dataset("audiofolder", data_dir=data_args.data_path, cache_dir=model_args.cache_dir) raw_datasets = raw_datasets.cast_column("audio", Audio(sampling_rate=16000)) raw_datasets = raw_datasets["train"].train_test_split(test_size=0.005, shuffle=True) (change cache_dir to other path ,ex:/DATA/cache) ### Expected behavior load data fast,at least 1000+ `Generating train split: 387875 examples [32:24:45, 1154.83 examples/s]` ### Environment info - `transformers` version: 4.28.0.dev0 - Platform: Linux-5.4.0-149-generic-x86_64-with-debian-bullseye-sid - Python version: 3.7.16 - Huggingface_hub version: 0.13.2 - PyTorch version (GPU?): 1.13.1+cu116 (True) - Tensorflow version (GPU?): not installed (NA) - Flax version (CPU?/GPU?/TPU?): not installed (NA) - Jax version: not installed - JaxLib version: not installed - Using GPU in script?: <fill in> - Using distributed or parallel set-up in script?: <fill in>
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5941/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5941/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5990
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5990/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5990/comments
https://api.github.com/repos/huggingface/datasets/issues/5990/events
https://github.com/huggingface/datasets/issues/5990
1,774,389,854
I_kwDODunzps5pwwpe
5,990
Pushing a large dataset on the hub consistently hangs
{ "login": "AntreasAntoniou", "id": 10792502, "node_id": "MDQ6VXNlcjEwNzkyNTAy", "avatar_url": "https://avatars.githubusercontent.com/u/10792502?v=4", "gravatar_id": "", "url": "https://api.github.com/users/AntreasAntoniou", "html_url": "https://github.com/AntreasAntoniou", "followers_url": "https://api.github.com/users/AntreasAntoniou/followers", "following_url": "https://api.github.com/users/AntreasAntoniou/following{/other_user}", "gists_url": "https://api.github.com/users/AntreasAntoniou/gists{/gist_id}", "starred_url": "https://api.github.com/users/AntreasAntoniou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/AntreasAntoniou/subscriptions", "organizations_url": "https://api.github.com/users/AntreasAntoniou/orgs", "repos_url": "https://api.github.com/users/AntreasAntoniou/repos", "events_url": "https://api.github.com/users/AntreasAntoniou/events{/privacy}", "received_events_url": "https://api.github.com/users/AntreasAntoniou/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi @AntreasAntoniou , sorry to know you are facing this issue. To help debugging it, could you tell me:\r\n- What is the total dataset size?\r\n- Is it always failing on the same shard or is the hanging problem happening randomly?\r\n- Were you able to save the dataset as parquet locally? This would help us determine if the problem comes from the upload or the file generation.\r\n\r\nI'm cc-ing @lhoestq who might have some insights from a `datasets` perspective.", "One trick that can also help is to check the traceback when you kill your python process: it will show where in the code it was hanging", "Right. So I did the trick @lhoestq suggested. Here is where things seem to hang\r\n\r\n```\r\nError while uploading 'data/train-00120-of-00195-466c2dbab2eb9989.parquet' to the Hub. \r\nPushing split train to the Hub. \r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:03<00:00, 1.15s/ba]\r\nUpload 1 LFS files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:52<00:00, 52.12s/it]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:03<00:00, 1.08s/ba]\r\nUpload 1 LFS files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:45<00:00, 45.54s/it]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:03<00:00, 1.08s/ba]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:03<00:00, 1.03s/ba^Upload 1 LFS files: 0%| | 0/1 [\r\n21:27:35<?, ?it/s] \r\nPushing dataset shards to the dataset hub: 63%|█████████████████████████████████████████████████████████████▎ | 122/195 [23:37:11<14:07:59, 696.98s/it]\r\n^CError in sys.excepthook: \r\nTraceback (most recent call last): \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1699, in print \r\n extend(render(renderable, render_options)) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1335, in render \r\n yield from self.render(render_output, _options) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1331, in render \r\n for render_output in iter_render: \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/constrain.py\", line 29, in __rich_console__ \r\n yield from console.render(self.renderable, child_options) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1331, in render \r\n for render_output in iter_render: \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/panel.py\", line 220, in __rich_console__ \r\n lines = console.render_lines(renderable, child_options, style=style) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1371, in render_lines \r\n lines = list( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/segment.py\", line 292, in split_and_crop_lines \r\n for segment in segments: \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1331, in render \r\n for render_output in iter_render: \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/padding.py\", line 97, in __rich_console__ \r\n lines = console.render_lines( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1371, in render_lines \r\n lines = list( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/segment.py\", line 292, in split_and_crop_lines \r\n for segment in segments: \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1335, in render \r\n yield from self.render(render_output, _options) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/console.py\", line 1331, in render \r\n for render_output in iter_render: \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/syntax.py\", line 611, in __rich_console__ \r\n segments = Segments(self._get_syntax(console, options)) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/segment.py\", line 668, in __init__ \r\n self.segments = list(segments) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/syntax.py\", line 674, in _get_syntax \r\n lines: Union[List[Text], Lines] = text.split(\"\\n\", allow_blank=ends_on_nl) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/text.py\", line 1042, in split \r\n lines = Lines( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/containers.py\", line 70, in __init__ \r\n self._lines: List[\"Text\"] = list(lines) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/text.py\", line 1043, in <genexpr> \r\n line for line in self.divide(flatten_spans()) if line.plain != separator \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/rich/text.py\", line 385, in plain \r\n if len(self._text) != 1: \r\nKeyboardInterrupt \r\n \r\nOriginal exception was: \r\nTraceback (most recent call last): \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/tqdm/contrib/concurrent.py\", line 51, in _executor_map \r\n return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs)) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/tqdm/std.py\", line 1178, in __iter__ \r\n for obj in iterable: \r\n File \"/opt/conda/envs/main/lib/python3.10/concurrent/futures/_base.py\", line 621, in result_iterator \r\n yield _result_or_cancel(fs.pop()) \r\n File \"/opt/conda/envs/main/lib/python3.10/concurrent/futures/_base.py\", line 319, in _result_or_cancel \r\n return fut.result(timeout) \r\n File \"/opt/conda/envs/main/lib/python3.10/concurrent/futures/_base.py\", line 453, in result \r\n self._condition.wait(timeout) \r\n File \"/opt/conda/envs/main/lib/python3.10/threading.py\", line 320, in wait \r\n waiter.acquire() \r\nKeyboardInterrupt \r\n \r\nDuring handling of the above exception, another exception occurred: \r\n \r\nTraceback (most recent call last): \r\n File \"/TALI/tali/scripts/validate_dataset.py\", line 127, in <module> \r\n train_dataset.push_to_hub(repo_id=\"Antreas/TALI-base\", max_shard_size=\"5GB\") \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/datasets/dataset_dict.py\", line 1583, in push_to_hub \r\n repo_id, split, uploaded_size, dataset_nbytes, _, _ = self[split]._push_parquet_shards_to_hub( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py\", line 5275, in _push_parquet_shards_to_hub \r\n _retry( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/datasets/utils/file_utils.py\", line 282, in _retry \r\n return func(*func_args, **func_kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn \r\n return fn(*args, **kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/hf_api.py\", line 826, in _inner \r\n return fn(self, *args, **kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/hf_api.py\", line 3205, in upload_file \r\n commit_info = self.create_commit( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn \r\n return fn(*args, **kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/hf_api.py\", line 826, in _inner \r\n return fn(self, *args, **kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/hf_api.py\", line 2680, in create_commit \r\n upload_lfs_files( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py\", line 118, in _inner_fn \r\n return fn(*args, **kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/_commit_api.py\", line 353, in upload_lfs_files \r\n thread_map( \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/tqdm/contrib/concurrent.py\", line 69, in thread_map \r\n return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs) \r\n File \"/opt/conda/envs/main/lib/python3.10/site-packages/tqdm/contrib/concurrent.py\", line 49, in _executor_map \r\n with PoolExecutor(max_workers=max_workers, initializer=tqdm_class.set_lock, \r\n File \"/opt/conda/envs/main/lib/python3.10/concurrent/futures/_base.py\", line 649, in __exit__ \r\n self.shutdown(wait=True) \r\n File \"/opt/conda/envs/main/lib/python3.10/concurrent/futures/thread.py\", line 235, in shutdown \r\n t.join() \r\n File \"/opt/conda/envs/main/lib/python3.10/threading.py\", line 1096, in join \r\n self._wait_for_tstate_lock() \r\n File \"/opt/conda/envs/main/lib/python3.10/threading.py\", line 1116, in _wait_for_tstate_lock \r\n if lock.acquire(block, timeout): \r\nKeyboardInterrupt \r\n```", "@Wauplin \r\n\r\n>What is the total dataset size?\r\n\r\nThere are three variants, and the random hanging happens on all three. The sizes are 2TB, 1TB, and 200GB. \r\n\r\n>Is it always failing on the same shard or is the hanging problem happening randomly?\r\n\r\nIt seems to be very much random, as restarting can help move past the previous hang, only to find a new one, or not. \r\n\r\n>Were you able to save the dataset as parquet locally? This would help us determine if the problem comes from the upload or the file generation.\r\n\r\nYes. The dataset seems to be locally stored as parquet. ", "Hmm it looks like an issue with TQDM lock. Maybe you can try updating TQDM ?", "I am using the latest version of tqdm\r\n\r\n```\r\n⬢ [Docker] ❯ pip install tqdm --upgrade\r\nRequirement already satisfied: tqdm in /opt/conda/envs/main/lib/python3.10/site-packages (4.65.0)\r\nWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\r\n```", "I tried trying to catch the hanging issue in action again\r\n\r\n```\r\nPushing dataset shards to the dataset hub: 65%|█████████████████████████████████████████████████████████████████▊ | 127/195 [2:28:02<1:19:15, 69.94s/it] \r\nError while uploading 'data/train-00127-of-00195-3f8d036ade107c27.parquet' to the Hub. \r\nPushing split train to the Hub. \r\nPushing dataset shards to the dataset hub: 64%|████████████████████████████████████████████████████████████████▏ | 124/195 [2:06:10<1:12:14, 61.05s/it]C^[^C^C^C \r\n╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ \r\n│ /TALI/tali/scripts/validate_dataset.py:127 in <module> │ \r\n│ │ \r\n│ 124 │ │ \r\n│ 125 │ while not succesful_competion: │ \r\n│ 126 │ │ try: │ \r\n│ ❱ 127 │ │ │ train_dataset.push_to_hub(repo_id=\"Antreas/TALI-base\", max_shard_size=\"5GB\") │ \r\n│ 128 │ │ │ succesful_competion = True │ \r\n│ 129 │ │ except Exception as e: │ \r\n│ 130 │ │ │ print(e) │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/dataset_dict.py:1583 in push_to_hub │ \r\n│ │ \r\n│ 1580 │ │ for split in self.keys(): │ \r\n│ 1581 │ │ │ logger.warning(f\"Pushing split {split} to the Hub.\") │ \r\n│ 1582 │ │ │ # The split=key needs to be removed before merging │ \r\n│ ❱ 1583 │ │ │ repo_id, split, uploaded_size, dataset_nbytes, _, _ = self[split]._push_parq │ \r\n│ 1584 │ │ │ │ repo_id, │ \r\n│ 1585 │ │ │ │ split=split, │ \r\n│ 1586 │ │ │ │ private=private, │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py:5263 in │ \r\n│ _push_parquet_shards_to_hub │ \r\n│ │ \r\n│ 5260 │ │ │ \r\n│ 5261 │ │ uploaded_size = 0 │ \r\n│ 5262 │ │ shards_path_in_repo = [] │ \r\n│ ❱ 5263 │ │ for index, shard in logging.tqdm( │ \r\n│ 5264 │ │ │ enumerate(itertools.chain([first_shard], shards_iter)), │ \r\n│ 5265 │ │ │ desc=\"Pushing dataset shards to the dataset hub\", │ \r\n│ 5266 │ │ │ total=num_shards, │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/tqdm/std.py:1178 in __iter__ │ \r\n│ │ \r\n│ 1175 │ │ time = self._time │ \r\n│ 1176 │ │ │ \r\n│ 1177 │ │ try: │\r\n│ ❱ 1178 │ │ │ for obj in iterable: │\r\n│ 1179 │ │ │ │ yield obj │\r\n│ 1180 │ │ │ │ # Update and possibly print the progressbar. │\r\n│ 1181 │ │ │ │ # Note: does not call self.update(1) for speed optimisation. │\r\n│ │\r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py:5238 in │\r\n│ shards_with_embedded_external_files │\r\n│ │\r\n│ 5235 │ │ │ │ for shard in shards: │\r\n│ 5236 │ │ │ │ │ format = shard.format │\r\n│ 5237 │ │ │ │ │ shard = shard.with_format(\"arrow\") │\r\n│ ❱ 5238 │ │ │ │ │ shard = shard.map( │\r\n│ 5239 │ │ │ │ │ │ embed_table_storage, │\r\n│ 5240 │ │ │ │ │ │ batched=True, │\r\n│ 5241 │ │ │ │ │ │ batch_size=1000, │\r\n│ │\r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py:578 in wrapper │\r\n│ │\r\n│ 575 │ │ else: │\r\n│ 576 │ │ │ self: \"Dataset\" = kwargs.pop(\"self\") │\r\n│ 577 │ │ # apply actual function │\r\n│ ❱ 578 │ │ out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs) │ \r\n│ 579 │ │ datasets: List[\"Dataset\"] = list(out.values()) if isinstance(out, dict) else [ou │ \r\n│ 580 │ │ for dataset in datasets: │ \r\n│ 581 │ │ │ # Remove task templates if a column mapping of the template is no longer val │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py:543 in wrapper │ \r\n│ │ \r\n│ 540 │ │ │ \"output_all_columns\": self._output_all_columns, │ \r\n│ 541 │ │ } │ \r\n│ 542 │ │ # apply actual function │ \r\n│ ❱ 543 │ │ out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs) │ \r\n│ 544 │ │ datasets: List[\"Dataset\"] = list(out.values()) if isinstance(out, dict) else [ou │ \r\n│ 545 │ │ # re-apply format to the output │ \r\n│ 546 │ │ for dataset in datasets: │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py:3073 in map │ \r\n│ │ \r\n│ 3070 │ │ │ │ │ leave=False, │ \r\n│ 3071 │ │ │ │ │ desc=desc or \"Map\", │ \r\n│ 3072 │ │ │ │ ) as pbar: │ \r\n│ ❱ 3073 │ │ │ │ │ for rank, done, content in Dataset._map_single(**dataset_kwargs): │ \r\n│ 3074 │ │ │ │ │ │ if done: │ \r\n│ 3075 │ │ │ │ │ │ │ shards_done += 1 │ \r\n│ 3076 │ │ │ │ │ │ │ logger.debug(f\"Finished processing shard number {rank} of {n │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_dataset.py:3464 in _map_single │ \r\n│ │ \r\n│ 3461 │ │ │ │ │ │ │ │ buf_writer, writer, tmp_file = init_buffer_and_writer() │ \r\n│ 3462 │ │ │ │ │ │ │ │ stack.enter_context(writer) │ \r\n│ 3463 │ │ │ │ │ │ │ if isinstance(batch, pa.Table): │ \r\n│ ❱ 3464 │ │ │ │ │ │ │ │ writer.write_table(batch) │ \r\n│ 3465 │ │ │ │ │ │ │ else: │ \r\n│ 3466 │ │ │ │ │ │ │ │ writer.write_batch(batch) │ \r\n│ 3467 │ │ │ │ │ │ num_examples_progress_update += num_examples_in_batch │ \r\n│ │ \r\n│ /opt/conda/envs/main/lib/python3.10/site-packages/datasets/arrow_writer.py:567 in write_table │ \r\n│ │ \r\n│ 564 │ │ │ writer_batch_size = self.writer_batch_size │ \r\n│ 565 │ │ if self.pa_writer is None: │ \r\n│ 566 │ │ │ self._build_writer(inferred_schema=pa_table.schema) │ \r\n│ ❱ 567 │ │ pa_table = pa_table.combine_chunks() │ \r\n│ 568 │ │ pa_table = table_cast(pa_table, self._schema) │ \r\n│ 569 │ │ if self.embed_local_files: │ \r\n│ 570 │ │ │ pa_table = embed_table_storage(pa_table) │ \r\n╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ \r\nKeyboardInterrupt \r\n```", "I'm on my phone so can't help that much. What I'd advice to do is to [save_to_disk](https://huggingface.co/docs/datasets/package_reference/main_classes#save_to_disk) if it's not already done and then upload the files/folder to the Hub separately. You can find what you need in the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload). It might not help finding the exact issue for now but at least it can unblock you. ", "In your last stacktrace it interrupted while embedding external content - in case your dataset in made of images or audio files that live on your disk. Is it the case ?", "Yeah, the dataset has images, audio, video and text. ", "It's maybe related to https://github.com/apache/arrow/issues/34455: are you using ArrayND features ?\r\n\r\nAlso what's your `pyarrow` version ? Could you try updating to >= 12.0.1 ?", "I was using pyarrow == 12.0.0\r\n\r\nI am not explicitly using ArrayND features, unless the hub API automatically converts my files to such. ", "I have now updated to pyarrow == 12.0.1 and retrying", "You can also try to reduce the `max_shard_size` - Sometimes parquet has a hard time working with data bigger than 2GB", "So, updating the pyarrow seems to help. It can still throw errors here and there but I can retry when that happens. It's better than hanging. \r\n\r\nHowever, I am a bit confused about something. I have uploaded my datasets, but while earlier I could see all three sets, now I can only see 1. What's going on? \r\nhttps://huggingface.co/datasets/Antreas/TALI-base\r\n\r\nI have seen this happen before as well, so I deleted and reuploaded, but this dataset is way too large for me to do this. ", "It's a bug on our side, I'll update the dataset viewer ;)\r\n\r\nThanks for reporting !", "Apparently this happened because of bad modifications in the README.md split metadata.\r\n\r\nI fixed them in this PR: https://huggingface.co/datasets/Antreas/TALI-base/discussions/1", "@lhoestq It's a bit odd that when uploading a dataset, one set at a time \"train\", \"val\", \"test\", the push_to_hub function overwrites the readme and removes differently named sets from previous commits. i.e., you push \"val\", all is well. Then you push \"test\", and the \"val\" entry disappears from the readme, while the data remain intact. ", "Also, just found another related issue. One of the many that make things hang or fail when pushing to hub. \r\n\r\nIn the following code:\r\n\r\n```python\r\ntrain_generator = lambda: data_generator(\"train\", percentage=1.0)\r\n val_generator = lambda: data_generator(\"val\")\r\n test_generator = lambda: data_generator(\"test\")\r\n\r\n train_data = datasets.Dataset.from_generator(\r\n train_generator,\r\n num_proc=mp.cpu_count(),\r\n writer_batch_size=5000,\r\n cache_dir=tali_dataset_dir,\r\n )\r\n\r\n val_data = datasets.Dataset.from_generator(\r\n val_generator,\r\n writer_batch_size=5000,\r\n num_proc=mp.cpu_count(),\r\n cache_dir=tali_dataset_dir,\r\n )\r\n\r\n test_data = datasets.Dataset.from_generator(\r\n test_generator,\r\n writer_batch_size=5000,\r\n num_proc=mp.cpu_count(),\r\n cache_dir=tali_dataset_dir,\r\n )\r\n\r\n print(f\"Pushing TALI-large to hub\")\r\n\r\n dataset = datasets.DatasetDict(\r\n {\"train\": train_data, \"val\": val_data, \"test\": test_data}\r\n )\r\n succesful_competion = False\r\n\r\n while not succesful_competion:\r\n try:\r\n dataset.push_to_hub(repo_id=\"Antreas/TALI-large\", max_shard_size=\"2GB\")\r\n succesful_competion = True\r\n except Exception as e:\r\n print(e)\r\n ```\r\n \r\n \r\n Things keep failing in the push_to_repo step, at random places, with the following error:\r\n \r\n ```bash\r\n Pushing dataset shards to the dataset hub: 7%|██████████▋ | 67/950 [42:41<9:22:37, 38.23s/it]\r\nError while uploading 'data/train-00067-of-00950-a4d179ed5a593486.parquet' to the Hub.\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:01<00:00, 1.81ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:11<00:00, 11.20s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.48ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:15<00:00, 15.30s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.39ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:11<00:00, 11.52s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.47ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.39s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.26ba/s]\r\nUpload 1 LFS files: 0%| | 0/1 [16:38<?, ?it/s]\r\nPushing dataset shards to the dataset hub: 7%|███████████▎ | 71/950 [44:37<9:12:28, 37.71s/it]\r\nError while uploading 'data/train-00071-of-00950-72bab6e5cb223aee.parquet' to the Hub.\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.18ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.94s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.36ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.67s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.57ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.16s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.68ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:09<00:00, 9.63s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.36ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.67s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.37ba/s]\r\nUpload 1 LFS files: 0%| | 0/1 [16:39<?, ?it/s]\r\nPushing dataset shards to the dataset hub: 8%|████████████ | 76/950 [46:21<8:53:08, 36.60s/it]\r\nError while uploading 'data/train-00076-of-00950-b90e4e3b433db179.parquet' to the Hub.\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.21ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:25<00:00, 25.40s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:01<00:00, 1.56ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.40s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.49ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:23<00:00, 23.53s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.27ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.25s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.42ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:11<00:00, 11.03s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.39ba/s]\r\nUpload 1 LFS files: 0%| | 0/1 [16:39<?, ?it/s]\r\nPushing dataset shards to the dataset hub: 9%|████████████▊ | 81/950 [48:30<8:40:22, 35.93s/it]\r\nError while uploading 'data/train-00081-of-00950-84b0450a1df093a9.parquet' to the Hub.\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.18ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:11<00:00, 11.65s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:01<00:00, 1.92ba/s]\r\nUpload 1 LFS files: 0%| | 0/1 [16:38<?, ?it/s]\r\nPushing dataset shards to the dataset hub: 9%|█████████████ | 82/950 [48:55<8:37:57, 35.80s/it]\r\nError while uploading 'data/train-00082-of-00950-0a1f52da35653e08.parquet' to the Hub.\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.31ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:26<00:00, 26.29s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.42ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.57s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.64ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:10<00:00, 10.35s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.64ba/s]\r\nUpload 1 LFS files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:11<00:00, 11.74s/it]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 2.31ba/s]\r\nUpload 1 LFS files: 0%| | 0/1 [16:40<?, ?it/s]\r\nPushing dataset shards to the dataset hub: 9%|█████████████▋ | 86/950 [50:48<8:30:25, 35.45s/it]\r\nError while uploading 'data/train-00086-of-00950-e1cc80dd17191b20.parquet' to the Hub.\r\n```\r\n\r\nI have a while loop that forces retries, but it seems that the progress itself is randomly getting lost as well. Any ideas on how to improve this? It has been blocking me for way too long. \r\n\r\nShould I build the parquet manually and then push manually as well? If I do things manually, how can I ensure my dataset works properly with \"stream=True\"? \r\n\r\nThank you for your help and time. ", "> @lhoestq It's a bit odd that when uploading a dataset, one set at a time \"train\", \"val\", \"test\", the push_to_hub function overwrites the readme and removes differently named sets from previous commits. i.e., you push \"val\", all is well. Then you push \"test\", and the \"val\" entry disappears from the readme, while the data remain intact.\r\n\r\nHmm this shouldn't happen. What code did you run exactly ? Using which version of `datasets` ?", "> I have a while loop that forces retries, but it seems that the progress itself is randomly getting lost as well. Any ideas on how to improve this? It has been blocking me for way too long.\r\n\r\nCould you also print the cause of the error (`e.__cause__`) ? Or show the full stack trace when the error happens ?\r\nThis would give more details about why it failed and would help investigate.", "> Should I build the parquet manually and then push manually as well? If I do things manually, how can I ensure my dataset works properly with \"stream=True\"?\r\n\r\nParquet is supported out of the box ^^\r\n\r\nIf you want to make sure it works as expected you can try locally first:\r\n```python\r\nds = load_dataset(\"path/to/local\", streaming=True)\r\n```", "@lhoestq @AntreasAntoniou I transferred this issue to the `datasets` repository as the questions and answers are more related to this repo. Hope it can help other users find the bug and fixes more easily (like updating [tqdm](https://github.com/huggingface/datasets/issues/5990#issuecomment-1607120204) and [pyarrow](https://github.com/huggingface/datasets/issues/5990#issuecomment-1607120278) or [setting a lower `max_shard_size`](https://github.com/huggingface/datasets/issues/5990#issuecomment-1607120328)).\r\n\r\n~For the initial \"pushing large dataset consistently hangs\"-issue, I still think it's best to try to `save_to_disk` first and then upload it manually/with a script (see [upload_folder](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder)). It's not the most satisfying solution but at least it would confirm from where the problem comes from.~\r\n\r\n**EDIT:** removed suggestion about saving to disk first (see https://github.com/huggingface/datasets/issues/5990#issuecomment-1607186914).", "> @lhoestq @AntreasAntoniou I transferred this issue to the datasets repository as the questions and answers are more related to this repo. Hope it can help other users find the bug and fixes more easily (like updating https://github.com/huggingface/datasets/issues/5990#issuecomment-1607120204 and https://github.com/huggingface/datasets/issues/5990#issuecomment-1607120278 or https://github.com/huggingface/datasets/issues/5990#issuecomment-1607120328).\r\n\r\nthanks :)\r\n\r\n> For the initial \"pushing large dataset consistently hangs\"-issue, I still think it's best to try to save_to_disk first and then upload it manually/with a script (see [upload_folder](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder)). It's not the most satisfying solution but at least it would confirm from where the problem comes from.\r\n\r\nAs I've already said in other discussions, I would not recommend pushing files saved with `save_to_disk` to the Hub but save to parquet shards and upload them instead. The Hub does not support datasets saved with `save_to_disk`, which is meant for disk only.", "> As I've already said in other discussions, I would not recommend pushing files saved with save_to_disk to the Hub but save to parquet shards and upload them instead. The Hub does not support datasets saved with save_to_disk, which is meant for disk only.\r\n\r\nWell noted, thanks. That part was not clear to me :)", "Sorry for not replying in a few days, I was on leave. :) \r\n\r\nSo, here are more information as to the error that causes some of the delay\r\n\r\n```bash\r\nPushing Antreas/TALI-tiny to hub\r\nAttempting to push to hub\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:24<00:00, 4.06s/ba]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:24<00:00, 4.15s/ba]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:26<00:00, 4.45s/ba]\r\n/opt/conda/envs/main/lib/python3.10/site-packages/huggingface_hub/lfs.py:310: UserWarning: hf_transfer is enabled but does not support uploading from bytes or BinaryIO, falling back to regular upload\r\n warnings.warn(\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:25<00:00, 4.26s/ba]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:27<00:00, 4.58s/ba]\r\nCreating parquet from Arrow format: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 6/6 [00:24<00:00, 4.10s/ba]\r\nPushing dataset shards to the dataset hub: 22%|████████████████████████▎ | 5/23 [52:23<3:08:37, 628.74s/it]\r\nException: Error while uploading 'data/train-00005-of-00023-e224d901fd65e062.parquet' to the Hub., with stacktrace: <traceback object at 0x7f745458d0c0>, and type: <class 'RuntimeError'>, and \r\ncause: HTTPSConnectionPool(host='s3.us-east-1.amazonaws.com', port=443): Max retries exceeded with url: \r\n/lfs.huggingface.co/repos/7c/d3/7cd385d9324302dc13e3986331d72d9be6fa0174c63dcfe0e08cd474f7f1e8b7/3415166ae28c0beccbbc692f38742b8dea2c197f5c805321104e888d21d7eb90?X-Amz-Algorithm=AWS4-HMAC-SHA256\r\n&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA4N7VTDGO27GPWFUO%2F20230627%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230627T003349Z&X-Amz-Expires=86400&X-Amz-Signature=5a12ff96f2\r\n91f644134170992a6628e5f3c4e7b2e7fc3e940b4378fe11ae5390&X-Amz-SignedHeaders=host&partNumber=1&uploadId=JSsK8r63XSF.VlKQx3Vf8OW4DEVp5YIIY7LPnuapNIegsxs5EHgM1p4u0.Nn6_wlPlQnvxm8HKMxZhczKE9KB74t0etB\r\noLcxqBIvsgey3uXBTZMAEGwU6y7CDUADiEIO&x-id=UploadPart (Caused by SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:2426)')))\r\nPush failed, retrying\r\nAttempting to push to hub\r\nPushing split train to the Hub.\r\n```\r\n\r\nOne issue is that the uploading does not continue from the chunk it failed off. It often continues from a very old chunk. e.g. if it failed on chunk 192/250, it will continue from say 53/250, and this behaviour appears almost random. ", "Are you using a proxy of some sort ?", "I am using a kubernetes cluster built into a university VPN. ", "So, other than the random connection drops here and there, any idea why the progress does not continue where it left off?\r\n\r\n```bash\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 10.79ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 13.65ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 13.39ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 13.04ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 13.52ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 12.28ba/s]\r\nPushing dataset shards to the dataset hub: 20%|██████████████████████ | 75/381 [1:34:39<6:26:11, 75.72s/it]\r\nException: Error while uploading 'data/train-00075-of-00381-1614bc251b778766.parquet' to the Hub., with stacktrace: <traceback object at 0x7fab6d9a4980>, and type: <class 'RuntimeError'>, and \r\ncause: HTTPSConnectionPool(host='s3.us-east-1.amazonaws.com', port=443): Max retries exceeded with url: \r\n/lfs.huggingface.co/repos/3b/31/3b311464573d8d63b137fcd5b40af1e7a5b1306843c88e80372d0117157504e5/ed8dae933fb79ae1ef5fb1f698f5125d3e1c02977ac69438631f152bb3bfdd1e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-\r\nAmz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA4N7VTDGO27GPWFUO%2F20230629%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230629T053004Z&X-Amz-Expires=86400&X-Amz-Signature=da2b26270edfd6d0\r\nd069c015a5a432031107a8664c3f0917717e5e40c688183c&X-Amz-SignedHeaders=host&partNumber=1&uploadId=2erWGHTh3ICqBLU_QvHfnygZ2tkMWbL0rEqpJdYohCKHUHnfwMjvoBIg0TI_KSGn4rSKxUxOyqSIzFUFSRSzixZeLeneaXJOw.Qx8\r\nzLKSV5xV7HRQDj4RBesNve6cSoo&x-id=UploadPart (Caused by SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:2426)')))\r\nPush failed, retrying\r\nAttempting to push to hub\r\nPushing split train to the Hub.\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 12.09ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 11.51ba/s]\r\nCreating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28/28 [00:02<00:00, 10.77ba/s]\r\nPushing dataset shards to the dataset hub: 20%|██████████████████████▋ | 77/381 [1:32:50<6:06:34, 72.35s/it]\r\nException: Error while uploading 'data/train-00077-of-00381-368b2327a9908aab.parquet' to the Hub., with stacktrace: <traceback object at 0x7fab45b27f80>, and type: <class 'RuntimeError'>, and \r\ncause: HTTPSConnectionPool(host='s3.us-east-1.amazonaws.com', port=443): Max retries exceeded with url: \r\n/lfs.huggingface.co/repos/3b/31/3b311464573d8d63b137fcd5b40af1e7a5b1306843c88e80372d0117157504e5/9462ff2c5e61283b53b091984a22de2f41a2f6e37b681171e2eca4a998f979cb?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-\r\nAmz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=AKIA4N7VTDGO27GPWFUO%2F20230629%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230629T070510Z&X-Amz-Expires=86400&X-Amz-Signature=9ab8487b93d443cd\r\n21f05476405855d46051a0771b4986bbb20f770ded21b1a4&X-Amz-SignedHeaders=host&partNumber=1&uploadId=UiHX1B.DcoAO2QmIHpWpCuNPwhXU_o1dsTkTGPqZt1P51o9k0yz.EsFD9eKpQMwgAST3jOatRG78I_JWRBeLBDYYVNp8r0TpIdeSg\r\neUg8uwPZOCPw9y5mWOw8MWJrnBo&x-id=UploadPart (Caused by SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:2426)')))\r\nPush failed, retrying\r\nAttempting to push to hub\r\nPushing split train to the Hub.\r\nPushing dataset shards to the dataset hub: 8%|████████▋ | 29/381 [27:39<5:50:03, 59.67s/it]\r\nMap: 36%|████████████████████████████████████████████████████ | 1000/2764 [00:35<00:34, 51.63 examples/Map: 72%|████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 2000/2764 [00:40<00:15, 49.06 examples/Map: 72%|████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 2000/2764 [00:55<00:15, 49.06 examples/Map: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2764/2764 [00:56<00:00, 48.82 examples/Pushing dataset shards to the dataset hub: 8%|████████▉ | 30/381 [28:35<5:43:03, 58.64s/iPushing dataset shards to the dataset hub: 8%|█████████▎ | 31/381 [29:40<5:52:18, 60.40s/iPushing dataset shards to the dataset hub: 8%|█████████▌ | 32/381 [30:46<6:02:20, 62.29s/it] \r\nMap: 36%|███████████████████████████████████████████████████▎ \r\n```\r\n\r\nThis is actually the issue that wastes the most time for me, and I need it fixed. Please advice on how I can go about it.\r\n\r\nNotice how the progress goes from \r\n| 77/381 to 30/381", "If the any shard is missing on the Hub, it will re-upload it. It looks like the 30th shard was missing on the Hub in your case. \r\n\r\nIt also means that the other files up to the 77th that were successfully uploaded won't be uploaded again.\r\n\r\ncc @mariosasko who might know better" ]
2023-06-10T14:46:47
2024-01-31T00:51:56
null
NONE
null
null
null
### Describe the bug Once I have locally built a large dataset that I want to push to hub, I use the recommended approach of .push_to_hub to get the dataset on the hub, and after pushing a few shards, it consistently hangs. This has happened over 40 times over the past week, and despite my best efforts to try and catch this happening and kill a process and restart, it seems to be extremely time wasting -- so I came to you to report this and to seek help. I already tried installing hf_transfer, but it doesn't support Byte file uploads so I uninstalled it. ### Reproduction ```python import multiprocessing as mp import pathlib from math import ceil import datasets import numpy as np from tqdm.auto import tqdm from tali.data.data import select_subtitles_between_timestamps from tali.utils import load_json tali_dataset_dir = "/data/" if __name__ == "__main__": full_dataset = datasets.load_dataset( "Antreas/TALI", num_proc=mp.cpu_count(), cache_dir=tali_dataset_dir ) def data_generator(set_name, percentage: float = 1.0): dataset = full_dataset[set_name] for item in tqdm(dataset): video_list = item["youtube_content_video"] video_list = np.random.choice( video_list, int(ceil(len(video_list) * percentage)) ) if len(video_list) == 0: continue captions = item["youtube_subtitle_text"] captions = select_subtitles_between_timestamps( subtitle_dict=load_json( captions.replace( "/data/", tali_dataset_dir, ) ), starting_timestamp=0, ending_timestamp=100000000, ) for video_path in video_list: temp_path = video_path.replace("/data/", tali_dataset_dir) video_path_actual: pathlib.Path = pathlib.Path(temp_path) if video_path_actual.exists(): item["youtube_content_video"] = open(video_path_actual, "rb").read() item["youtube_subtitle_text"] = captions yield item train_generator = lambda: data_generator("train", percentage=0.1) val_generator = lambda: data_generator("val") test_generator = lambda: data_generator("test") train_data = datasets.Dataset.from_generator( train_generator, num_proc=mp.cpu_count(), writer_batch_size=5000, cache_dir=tali_dataset_dir, ) val_data = datasets.Dataset.from_generator( val_generator, writer_batch_size=5000, num_proc=mp.cpu_count(), cache_dir=tali_dataset_dir, ) test_data = datasets.Dataset.from_generator( test_generator, writer_batch_size=5000, num_proc=mp.cpu_count(), cache_dir=tali_dataset_dir, ) dataset = datasets.DatasetDict( { "train": train_data, "val": val_data, "test": test_data, } ) succesful_competion = False while not succesful_competion: try: dataset.push_to_hub(repo_id="Antreas/TALI-small", max_shard_size="5GB") succesful_competion = True except Exception as e: print(e) ``` ### Logs ```shell Pushing dataset shards to the dataset hub: 33%|██████████████████████████████████████▎ | 7/21 [24:33<49:06, 210.45s/it] Error while uploading 'data/val-00007-of-00021-6b216a984af1a4c8.parquet' to the Hub. Pushing split train to the Hub. Resuming upload of the dataset shards. Pushing dataset shards to the dataset hub: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 46/46 [42:10<00:00, 55.01s/it] Pushing split val to the Hub. Resuming upload of the dataset shards. Creating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:01<00:00, 1.55ba/s] Upload 1 LFS files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:23<00:00, 23.51s/it] Creating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:02<00:00, 1.39ba/s] Upload 1 LFS files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:30<00:00, 30.19s/it] Creating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:02<00:00, 1.28ba/s] Upload 1 LFS files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:24<00:00, 24.08s/it] Creating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:02<00:00, 1.42ba/s] Upload 1 LFS files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:23<00:00, 23.97s/it] Creating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:02<00:00, 1.49ba/s] Creating parquet from Arrow format: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:02<00:00, 1.54ba/s^ Upload 1 LFS files: 0%| | 0/1 [04:42<?, ?it/s] Pushing dataset shards to the dataset hub: 52%|████████████████████████████████████████████████████████████▏ | 11/21 [17:23<15:48, 94.82s/it] That's where it got stuck ``` ### System info ```shell - huggingface_hub version: 0.15.1 - Platform: Linux-5.4.0-147-generic-x86_64-with-glibc2.35 - Python version: 3.10.11 - Running in iPython ?: No - Running in notebook ?: No - Running in Google Colab ?: No - Token path ?: /root/.cache/huggingface/token - Has saved token ?: True - Who am I ?: Antreas - Configured git credential helpers: store - FastAI: N/A - Tensorflow: N/A - Torch: 2.1.0.dev20230606+cu121 - Jinja2: 3.1.2 - Graphviz: N/A - Pydot: N/A - Pillow: 9.5.0 - hf_transfer: N/A - gradio: N/A - numpy: 1.24.3 - ENDPOINT: https://huggingface.co - HUGGINGFACE_HUB_CACHE: /root/.cache/huggingface/hub - HUGGINGFACE_ASSETS_CACHE: /root/.cache/huggingface/assets - HF_TOKEN_PATH: /root/.cache/huggingface/token - HF_HUB_OFFLINE: False - HF_HUB_DISABLE_TELEMETRY: False - HF_HUB_DISABLE_PROGRESS_BARS: None - HF_HUB_DISABLE_SYMLINKS_WARNING: False - HF_HUB_DISABLE_EXPERIMENTAL_WARNING: False - HF_HUB_DISABLE_IMPLICIT_TOKEN: False - HF_HUB_ENABLE_HF_TRANSFER: False ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5990/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5990/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5939
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5939/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5939/comments
https://api.github.com/repos/huggingface/datasets/issues/5939/events
https://github.com/huggingface/datasets/issues/5939
1,749,955,883
I_kwDODunzps5oTjUr
5,939
.
{ "login": "flckv", "id": 103381497, "node_id": "U_kgDOBil5-Q", "avatar_url": "https://avatars.githubusercontent.com/u/103381497?v=4", "gravatar_id": "", "url": "https://api.github.com/users/flckv", "html_url": "https://github.com/flckv", "followers_url": "https://api.github.com/users/flckv/followers", "following_url": "https://api.github.com/users/flckv/following{/other_user}", "gists_url": "https://api.github.com/users/flckv/gists{/gist_id}", "starred_url": "https://api.github.com/users/flckv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/flckv/subscriptions", "organizations_url": "https://api.github.com/users/flckv/orgs", "repos_url": "https://api.github.com/users/flckv/repos", "events_url": "https://api.github.com/users/flckv/events{/privacy}", "received_events_url": "https://api.github.com/users/flckv/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2023-06-09T14:01:34
2023-06-12T12:19:34
2023-06-12T12:19:19
NONE
null
null
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5939/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5939/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5936
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5936/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5936/comments
https://api.github.com/repos/huggingface/datasets/issues/5936/events
https://github.com/huggingface/datasets/issues/5936
1,748,424,388
I_kwDODunzps5oNtbE
5,936
Sequence of array not supported for most dtype
{ "login": "qgallouedec", "id": 45557362, "node_id": "MDQ6VXNlcjQ1NTU3MzYy", "avatar_url": "https://avatars.githubusercontent.com/u/45557362?v=4", "gravatar_id": "", "url": "https://api.github.com/users/qgallouedec", "html_url": "https://github.com/qgallouedec", "followers_url": "https://api.github.com/users/qgallouedec/followers", "following_url": "https://api.github.com/users/qgallouedec/following{/other_user}", "gists_url": "https://api.github.com/users/qgallouedec/gists{/gist_id}", "starred_url": "https://api.github.com/users/qgallouedec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/qgallouedec/subscriptions", "organizations_url": "https://api.github.com/users/qgallouedec/orgs", "repos_url": "https://api.github.com/users/qgallouedec/repos", "events_url": "https://api.github.com/users/qgallouedec/events{/privacy}", "received_events_url": "https://api.github.com/users/qgallouedec/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Related, `float16` is the only dtype not supported by `Array2D` (probably by every `ArrayND`):\r\n\r\n```python\r\nfrom datasets import Array2D, Features, Dataset\r\n\r\nimport numpy as np\r\n\r\nfor dtype in [\r\n \"bool\", # ok\r\n \"int8\", # ok\r\n \"int16\", # ok\r\n \"int32\", # ok\r\n \"int64\", # ok\r\n \"uint8\", # ok\r\n \"uint16\", # ok\r\n \"uint32\", # ok\r\n \"uint64\", # ok\r\n \"float16\", # failed\r\n \"float32\", # ok\r\n \"float64\", # ok\r\n]:\r\n features = Features({\"foo\": Array2D(dtype=dtype, shape=(3, 4))})\r\n array = np.zeros((3, 4), dtype=dtype)\r\n try:\r\n dataset = Dataset.from_dict({\"foo\": [array]}, features=features)\r\n except Exception as e:\r\n print(f\"Failed for dtype={dtype}\")\r\n```", "Here's something I can't explain:\r\n\r\nWhen an array is encoded in the `from_dict` method, the numpy array is converted to a list (thus losing the original dtype, which is transfromed to the nearest builtin Python type)\r\n\r\nhttps://github.com/huggingface/datasets/blob/6ee61e6e695b1df9f232d47faf3a5e2b30b33737/src/datasets/features/features.py#L524-L525\r\n\r\nHowever, later on, this same data is written to memory, and it seems authorized that the data is an array (or in this case, a list of arrays). \r\n\r\nhttps://github.com/huggingface/datasets/blob/6ee61e6e695b1df9f232d47faf3a5e2b30b33737/src/datasets/arrow_writer.py#L185-L186\r\n\r\nSo the question is: why convert it to a Python list? This seems to be quite expensive both in terms of write time (all data is copied) and memory (e.g., an int8 is converted to an int64).\r\n\r\nFinally, if I try to remove this step, it solves all the previous problems, and it seems to me that it doesn't break anything (the CI passes without problem).", "Arrow only support 1d numpy arrays, so we convert multidim arrays to lists of 1s arrays (and keep the dtype).\r\n\r\nThough you noticed that it's concerting to lists and lose the dtype. If it's the case then it's a bug.", "Ok the conversion to list shouldn't be there indeed ! Could you open a PR to remove it ?" ]
2023-06-08T18:18:07
2023-06-14T15:03:34
2023-06-14T15:03:34
MEMBER
null
null
null
### Describe the bug Create a dataset composed of sequence of array fails for most dtypes (see code below). ### Steps to reproduce the bug ```python from datasets import Sequence, Array2D, Features, Dataset import numpy as np for dtype in [ "bool", # ok "int8", # failed "int16", # failed "int32", # failed "int64", # ok "uint8", # failed "uint16", # failed "uint32", # failed "uint64", # failed "float16", # failed "float32", # failed "float64", # ok ]: features = Features({"foo": Sequence(Array2D(dtype=dtype, shape=(2, 2)))}) sequence = [ [[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]], ] array = np.array(sequence, dtype=dtype) try: dataset = Dataset.from_dict({"foo": [array]}, features=features) except Exception as e: print(f"Failed for dtype={dtype}") ``` Traceback for `dtype="int8"`: ``` Traceback (most recent call last): File "/home/qgallouedec/datasets/a.py", line 29, in <module> raise e File "/home/qgallouedec/datasets/a.py", line 26, in <module> dataset = Dataset.from_dict({"foo": [array]}, features=features) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 899, in from_dict pa_table = InMemoryTable.from_pydict(mapping=mapping) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 799, in from_pydict return cls(pa.Table.from_pydict(*args, **kwargs)) File "pyarrow/table.pxi", line 3725, in pyarrow.lib.Table.from_pydict File "pyarrow/table.pxi", line 5254, in pyarrow.lib._from_pydict File "pyarrow/array.pxi", line 350, in pyarrow.lib.asarray File "pyarrow/array.pxi", line 236, in pyarrow.lib.array File "pyarrow/array.pxi", line 110, in pyarrow.lib._handle_arrow_array_protocol File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/arrow_writer.py", line 204, in __arrow_array__ out = cast_array_to_feature(out, type, allow_number_to_str=not self.trying_type) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 1833, in wrapper return func(array, *args, **kwargs) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 2091, in cast_array_to_feature casted_values = _c(array.values, feature.feature) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 1833, in wrapper return func(array, *args, **kwargs) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 2139, in cast_array_to_feature return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 1833, in wrapper return func(array, *args, **kwargs) File "/home/qgallouedec/env/lib/python3.10/site-packages/datasets/table.py", line 1967, in array_cast return pa_type.wrap_array(array) File "pyarrow/types.pxi", line 879, in pyarrow.lib.BaseExtensionType.wrap_array TypeError: Incompatible storage type for extension<arrow.py_extension_type<Array2DExtensionType>>: expected list<item: list<item: int8>>, got list<item: list<item: int64>> ``` ### Expected behavior Not to fail. ### Environment info - Python 3.10.6 - datasets: master branch - Numpy: 1.23.4
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5936/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5936/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5931
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5931/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5931/comments
https://api.github.com/repos/huggingface/datasets/issues/5931/events
https://github.com/huggingface/datasets/issues/5931
1,745,408,784
I_kwDODunzps5oCNMQ
5,931
`datasets.map` not reusing cached copy by default
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "This can happen when a map transform cannot be hashed deterministically (e.g., an object referenced by the transform changes its state after the first call - an issue with fast tokenizers). The solution is to provide `cache_file_name` in the `map` call to check this file for the cached result instead of relying on the default caching mechanism." ]
2023-06-07T09:03:33
2023-06-21T16:15:40
2023-06-21T16:15:40
CONTRIBUTOR
null
null
null
### Describe the bug When I load the dataset from local directory, it's cached copy is picked up after first time. However, for `map` operation, the operation is applied again and cached copy is not picked up. Is there any way to pick cached copy instead of processing it again? The only solution I could think of was to use `save_to_disk` after my last transform and then use that in my DataLoader pipeline. Are there any other solutions for the same? One more thing, my dataset is occupying 6GB storage memory after I use `map`, is there any way I can reduce that memory usage? ### Steps to reproduce the bug ``` # make sure that dataset decodes audio with correct sampling rate dataset_sampling_rate = next(iter(self.raw_datasets.values())).features["audio"].sampling_rate if dataset_sampling_rate != self.feature_extractor.sampling_rate: self.raw_datasets = self.raw_datasets.cast_column( "audio", datasets.features.Audio(sampling_rate=self.feature_extractor.sampling_rate) ) vectorized_datasets = self.raw_datasets.map( self.prepare_dataset, remove_columns=next(iter(self.raw_datasets.values())).column_names, num_proc=self.num_workers, desc="preprocess datasets", ) # filter data that is longer than max_input_length self.vectorized_datasets = vectorized_datasets.filter( self.is_audio_in_length_range, num_proc=self.num_workers, input_columns=["input_length"], ) def prepare_dataset(self, batch): # load audio sample = batch["audio"] inputs = self.feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) batch["input_values"] = inputs.input_values[0] batch["input_length"] = len(batch["input_values"]) batch["labels"] = self.tokenizer(batch["target_text"]).input_ids return batch ``` ### Expected behavior `map` to use cached copy and if possible an alternative technique to reduce memory usage after using `map` ### Environment info - `datasets` version: 2.12.0 - Platform: Linux-3.10.0-1160.71.1.el7.x86_64-x86_64-with-glibc2.17 - Python version: 3.8.16 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5931/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5931/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5930
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5930/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5930/comments
https://api.github.com/repos/huggingface/datasets/issues/5930/events
https://github.com/huggingface/datasets/issues/5930
1,745,184,395
I_kwDODunzps5oBWaL
5,930
loading private custom dataset script - authentication error
{ "login": "flckv", "id": 103381497, "node_id": "U_kgDOBil5-Q", "avatar_url": "https://avatars.githubusercontent.com/u/103381497?v=4", "gravatar_id": "", "url": "https://api.github.com/users/flckv", "html_url": "https://github.com/flckv", "followers_url": "https://api.github.com/users/flckv/followers", "following_url": "https://api.github.com/users/flckv/following{/other_user}", "gists_url": "https://api.github.com/users/flckv/gists{/gist_id}", "starred_url": "https://api.github.com/users/flckv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/flckv/subscriptions", "organizations_url": "https://api.github.com/users/flckv/orgs", "repos_url": "https://api.github.com/users/flckv/repos", "events_url": "https://api.github.com/users/flckv/events{/privacy}", "received_events_url": "https://api.github.com/users/flckv/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "This issue seems to have been resolved, so I'm closing it." ]
2023-06-07T06:58:23
2023-06-15T14:49:21
2023-06-15T14:49:20
NONE
null
null
null
### Describe the bug Train model with my custom dataset stored in HuggingFace and loaded with the loading script requires authentication but I am not sure how ? I am logged in in the terminal, in the browser. I receive this error: /python3.8/site-packages/datasets/utils/file_utils.py", line 566, in get_from_cache raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") ConnectionError: Couldn't reach https://huggingface.co/datasets/fkov/s/blob/main/data/s/train/labels `(ConnectionError('Unauthorized for URL `https://huggingface.co/datasets/fkov/s/blob/main/data/s/train/labels. Please use the parameter `**`use_auth_token=True`**` after logging in with `**`huggingface-cli login`**`')) when I added: `use_auth_token=True` and logged in via terminal then I received error: or the same error in different format: raise ConnectionError(f"`Couldn't reach {url} (error {response.status_code}`)") ConnectionError: Couldn't reach https://huggingface.co/datasets/fkov/s/blob/main/data/s/train/labels (`error 401`) ### Steps to reproduce the bug 1. cloned transformers library locally: https://huggingface.co/docs/transformers/v4.15.0/examples : > git clone https://github.com/huggingface/transformers > cd transformers > pip install . > cd /transformers/examples/pytorch/audio-classification > pip install -r requirements.txt 2. created **loading script** > https://huggingface.co/docs/datasets/dataset_script added next to dataset: 3. uploaded **private custom dataset** with loading script to HuggingFace > https://huggingface.co/docs/datasets/dataset_script 4. added dataset loading script to **local directory** in the above cloned transformers library: > cd /transformers/examples/pytorch/audio-classification 5. logged in to HuggingFace on local terminal with : > **huggingface-cli login** 6. run the model with the custom dataset stored on HuggingFace with code: https://github.com/huggingface/transformers/blob/main/examples/pytorch/audio-classification/README.md cd /transformers/examples/pytorch/audio-classification > python run_audio_classification.py \ > --model_name_or_path facebook/wav2vec2-base \ > --output_dir l/users/flck/outputs/wav2vec2-base-s \ > --overwrite_output_dir \ > --dataset_name s \ > --dataset_config_name s \ > --remove_unused_columns False \ > --do_train \ > --do_eval \ > --fp16 \ > --learning_rate 3e-5 \ > --max_length_seconds 1 \ > --attention_mask False \ > --warmup_ratio 0.1 \ > --num_train_epochs 5 \ > --per_device_train_batch_size 32 \ > --gradient_accumulation_steps 4 \ > --per_device_eval_batch_size 32 \ > --dataloader_num_workers 4 \ > --logging_strategy steps \ > --logging_steps 10 \ > --evaluation_strategy epoch \ > --save_strategy epoch \ > --load_best_model_at_end True \ > --metric_for_best_model accuracy \ > --save_total_limit 3 \ > --seed 0 \ > --push_to_hub \ > **--use_auth_token=True** ### Expected behavior Be able to train a model the https://github.com/huggingface/transformers/blob/main/examples/pytorch/audio-classification/ run_audio_classification.py with private custom dataset stored on HuggingFace. ### Environment info - datasets version: 2.12.0 - `transformers` version: 4.30.0.dev0 - Platform: Linux-5.4.204-ql-generic-12.0-19-x86_64-with-glibc2.17 - Python version: 3.8.12 - Huggingface_hub version: 0.15.1 - Safetensors version: 0.3.1 - PyTorch version (GPU?): 2.0.1+cu117 (True) Versions of relevant libraries: [pip3] numpy==1.24.3 [pip3] torch==2.0.1 [pip3] torchaudio==2.0.2 [conda] numpy 1.24.3 pypi_0 pypi [conda] torch 2.0.1 pypi_0 pypi [conda] torchaudio 2.0.2 pypi_0 pypi
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5930/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5930/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5929
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5929/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5929/comments
https://api.github.com/repos/huggingface/datasets/issues/5929/events
https://github.com/huggingface/datasets/issues/5929
1,744,478,456
I_kwDODunzps5n-qD4
5,929
Importing PyTorch reduces multiprocessing performance for map
{ "login": "Maxscha", "id": 12814709, "node_id": "MDQ6VXNlcjEyODE0NzA5", "avatar_url": "https://avatars.githubusercontent.com/u/12814709?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Maxscha", "html_url": "https://github.com/Maxscha", "followers_url": "https://api.github.com/users/Maxscha/followers", "following_url": "https://api.github.com/users/Maxscha/following{/other_user}", "gists_url": "https://api.github.com/users/Maxscha/gists{/gist_id}", "starred_url": "https://api.github.com/users/Maxscha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Maxscha/subscriptions", "organizations_url": "https://api.github.com/users/Maxscha/orgs", "repos_url": "https://api.github.com/users/Maxscha/repos", "events_url": "https://api.github.com/users/Maxscha/events{/privacy}", "received_events_url": "https://api.github.com/users/Maxscha/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! The times match when I run this code locally or on Colab.\r\n\r\nAlso, we use `multiprocess`, not `multiprocessing`, for parallelization, and torch's `__init__.py` (executed on `import torch` ) slightly modifies the latter.", "Hey Mariosasko,\r\n\r\nThanks for looking into it. We further did some investigations after your comment and figured out it's only affecting some hardware/software configurations with the `pytorch` installation of `conda-forge`. Based on this we found the following issue in PyTorch: https://github.com/pytorch/pytorch/issues/102269 with a quick fix for now.\r\n\r\nSince it seems to be a deeper issue with forking processes, the difference between`multiprocess` and `multiprocessing` didn't make a difference.\r\n\r\nClosing this, since the issue comes from `pytorch` not `dataset`. \r\n" ]
2023-06-06T19:42:25
2023-06-16T13:09:12
2023-06-16T13:09:12
NONE
null
null
null
### Describe the bug I noticed that the performance of my dataset preprocessing with `map(...,num_proc=32)` decreases when PyTorch is imported. ### Steps to reproduce the bug I created two example scripts to reproduce this behavior: ``` import datasets datasets.disable_caching() from datasets import Dataset import time PROC=32 if __name__ == "__main__": dataset = [True] * 10000000 dataset = Dataset.from_dict({'train': dataset}) start = time.time() dataset.map(lambda x: x, num_proc=PROC) end = time.time() print(end - start) ``` Takes around 4 seconds on my machine. While the same code, but with an `import torch`: ``` import datasets datasets.disable_caching() from datasets import Dataset import time import torch PROC=32 if __name__ == "__main__": dataset = [True] * 10000000 dataset = Dataset.from_dict({'train': dataset}) start = time.time() dataset.map(lambda x: x, num_proc=PROC) end = time.time() print(end - start) ``` takes around 22 seconds. ### Expected behavior I would expect that the import of torch to not have such a significant effect on the performance of map using multiprocessing. ### Environment info - `datasets` version: 2.12.0 - Platform: Linux-5.15.0-56-generic-x86_64-with-glibc2.35 - Python version: 3.11.3 - Huggingface_hub version: 0.15.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.2 - torch: 2.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5929/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5929/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5927
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5927/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5927/comments
https://api.github.com/repos/huggingface/datasets/issues/5927/events
https://github.com/huggingface/datasets/issues/5927
1,744,009,032
I_kwDODunzps5n83dI
5,927
`IndexError` when indexing `Sequence` of `Array2D` with `None` values
{ "login": "qgallouedec", "id": 45557362, "node_id": "MDQ6VXNlcjQ1NTU3MzYy", "avatar_url": "https://avatars.githubusercontent.com/u/45557362?v=4", "gravatar_id": "", "url": "https://api.github.com/users/qgallouedec", "html_url": "https://github.com/qgallouedec", "followers_url": "https://api.github.com/users/qgallouedec/followers", "following_url": "https://api.github.com/users/qgallouedec/following{/other_user}", "gists_url": "https://api.github.com/users/qgallouedec/gists{/gist_id}", "starred_url": "https://api.github.com/users/qgallouedec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/qgallouedec/subscriptions", "organizations_url": "https://api.github.com/users/qgallouedec/orgs", "repos_url": "https://api.github.com/users/qgallouedec/repos", "events_url": "https://api.github.com/users/qgallouedec/events{/privacy}", "received_events_url": "https://api.github.com/users/qgallouedec/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Easy fix would be to add:\r\n\r\n```python\r\nnull_indices -= np.arange(len(null_indices))\r\n```\r\n\r\nbefore L279, but I'm not sure it's the most intuitive way to fix it.", "Same issue here:\r\n\r\nhttps://github.com/huggingface/datasets/blob/7fcbe5b1575c8d162b65b9397b3dfda995a4e048/src/datasets/features/features.py#L1398\r\n\r\nFixed in #5948 " ]
2023-06-06T14:36:22
2023-06-13T12:39:39
2023-06-09T13:23:50
MEMBER
null
null
null
### Describe the bug Having `None` values in a `Sequence` of `ArrayND` fails. ### Steps to reproduce the bug ```python from datasets import Array2D, Dataset, Features, Sequence data = [ [ [[0]], None, None, ] ] feature = Sequence(Array2D((1, 1), dtype="int64")) dataset = Dataset.from_dict({"a": data}, features=Features({"a": feature})) dataset[0] # error raised only when indexing ``` ``` Traceback (most recent call last): File "/Users/quentingallouedec/gia/c.py", line 13, in <module> dataset[0] # error raised only when indexing File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 2658, in __getitem__ return self._getitem(key) File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 2643, in _getitem formatted_output = format_table( File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/formatting/formatting.py", line 634, in format_table return formatter(pa_table, query_type=query_type) File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/formatting/formatting.py", line 406, in __call__ return self.format_row(pa_table) File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/formatting/formatting.py", line 441, in format_row row = self.python_arrow_extractor().extract_row(pa_table) File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/formatting/formatting.py", line 144, in extract_row return _unnest(pa_table.to_pydict()) File "pyarrow/table.pxi", line 4146, in pyarrow.lib.Table.to_pydict File "pyarrow/table.pxi", line 1312, in pyarrow.lib.ChunkedArray.to_pylist File "pyarrow/array.pxi", line 1521, in pyarrow.lib.Array.to_pylist File "pyarrow/scalar.pxi", line 675, in pyarrow.lib.ListScalar.as_py File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/features/features.py", line 760, in to_pylist return self.to_numpy(zero_copy_only=zero_copy_only).tolist() File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/datasets/features/features.py", line 725, in to_numpy numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) File "<__array_function__ internals>", line 200, in insert File "/Users/quentingallouedec/gia/env/lib/python3.10/site-packages/numpy/lib/function_base.py", line 5426, in insert old_mask[indices] = False IndexError: index 3 is out of bounds for axis 0 with size 3 ``` AFAIK, the problem only occurs when you use a `Sequence` of `ArrayND`. I strongly suspect that the problem comes from this line, or `np.insert` is misused: https://github.com/huggingface/datasets/blob/02ee418831aba68d0be93227bce8b3f42ef8980f/src/datasets/features/features.py#L729 To put t simply, you want something that do that: ```python import numpy as np numpy_arr = np.zeros((1, 1, 1)) null_indices = np.array([1, 2]) np.insert(numpy_arr, null_indices, np.nan, axis=0) # raise an error, instead of outputting # array([[[ 0.]], # [[nan]], # [[nan]]]) ``` ### Expected behavior The previous code should not raise an error. ### Environment info - Python 3.10.11 - datasets 2.10.0 - pyarrow 12.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5927/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5927/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5926
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5926/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5926/comments
https://api.github.com/repos/huggingface/datasets/issues/5926/events
https://github.com/huggingface/datasets/issues/5926
1,743,922,028
I_kwDODunzps5n8iNs
5,926
Uncaught exception when generating the splits from a dataset that miss data
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @severo.\r\n\r\nThis is a known issue with `fsspec`:\r\n- #5862\r\n- https://github.com/fsspec/filesystem_spec/issues/1265" ]
2023-06-06T13:51:01
2023-06-07T07:53:16
null
CONTRIBUTOR
null
null
null
### Describe the bug Dataset https://huggingface.co/datasets/blog_authorship_corpus has an issue with its hosting platform, since https://drive.google.com/u/0/uc?id=1cGy4RNDV87ZHEXbiozABr9gsSrZpPaPz&export=download returns 404 error. But when trying to generate the split names, we get an exception which is now correctly caught. Seen originally in https://github.com/huggingface/datasets-server/blob/adbdcd6710ffed4e2eb2e4cd905b5e0dff530a15/services/worker/src/worker/job_runners/config/parquet_and_info.py#L435 ### Steps to reproduce the bug ```python >>> from datasets import StreamingDownloadManager, load_dataset_builder >>> builder = load_dataset_builder(path="blog_authorship_corpus") Downloading builder script: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5.60k/5.60k [00:00<00:00, 23.1MB/s] Downloading metadata: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2.81k/2.81k [00:00<00:00, 14.7MB/s] Downloading readme: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7.30k/7.30k [00:00<00:00, 30.8MB/s] >>> dl_manager = StreamingDownloadManager(base_path=builder.base_path) >>> builder._split_generators(dl_manager) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/slesage/.cache/huggingface/modules/datasets_modules/datasets/blog_authorship_corpus/6f5d78241afd8313111956f877a57db7a0e9fc6718255dc85df0928197feb683/blog_authorship_corpus.py", line 79, in _split_generators data = dl_manager.download_and_extract(_DATA_URL) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 1087, in download_and_extract return self.extract(self.download(url_or_urls)) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 1039, in extract urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 435, in map_nested return function(data_struct) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 1044, in _extract protocol = _get_extraction_protocol(urlpath, use_auth_token=self.download_config.use_auth_token) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py", line 433, in _get_extraction_protocol with fsspec.open(urlpath, **kwargs) as f: File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/fsspec/core.py", line 439, in open return open_files( File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/fsspec/core.py", line 194, in __getitem__ out = super().__getitem__(item) IndexError: list index out of range ``` ### Expected behavior We should have an Exception raised by the datasets library. ### Environment info - `datasets` version: 2.12.0 - Platform: Linux-5.19.0-1026-aws-x86_64-with-glibc2.35 - Python version: 3.9.15 - Huggingface_hub version: 0.15.1 - PyArrow version: 11.0.0 - Pandas version: 2.0.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5926/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5926/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5925
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5925/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5925/comments
https://api.github.com/repos/huggingface/datasets/issues/5925/events
https://github.com/huggingface/datasets/issues/5925
1,741,941,436
I_kwDODunzps5n0-q8
5,925
Breaking API change in datasets.list_datasets caused by change in HfApi.list_datasets
{ "login": "mtkinit", "id": 78868366, "node_id": "MDQ6VXNlcjc4ODY4MzY2", "avatar_url": "https://avatars.githubusercontent.com/u/78868366?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mtkinit", "html_url": "https://github.com/mtkinit", "followers_url": "https://api.github.com/users/mtkinit/followers", "following_url": "https://api.github.com/users/mtkinit/following{/other_user}", "gists_url": "https://api.github.com/users/mtkinit/gists{/gist_id}", "starred_url": "https://api.github.com/users/mtkinit/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mtkinit/subscriptions", "organizations_url": "https://api.github.com/users/mtkinit/orgs", "repos_url": "https://api.github.com/users/mtkinit/repos", "events_url": "https://api.github.com/users/mtkinit/events{/privacy}", "received_events_url": "https://api.github.com/users/mtkinit/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2023-06-05T14:46:04
2023-06-19T17:22:43
2023-06-19T17:22:43
NONE
null
null
null
### Describe the bug Hi all, after an update of the `datasets` library, we observer crashes in our code. We relied on `datasets.list_datasets` returning a `list`. Now, after the API of the HfApi.list_datasets was changed and it returns a `list` instead of an `Iterable`, the `datasets.list_datasets` now sometimes returns a `list` and somesimes an `Iterable`. It would be helpful to indicate that by the return type of the `datasets.list_datasets` function. Thanks, Martin ### Steps to reproduce the bug Here, the code crashed after we updated the `datasets` library: ```python # list_datasets no longer returns a list, which leads to an error when one tries to slice it for datasets.list_datasets(with_details=True)[:limit]: ... ``` ### Expected behavior It would be helpful to indicate that by the return type of the `datasets.list_datasets` function. ### Environment info Ubuntu 22.04 datasets 2.12.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5925/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5925/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5923
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5923/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5923/comments
https://api.github.com/repos/huggingface/datasets/issues/5923/events
https://github.com/huggingface/datasets/issues/5923
1,737,436,227
I_kwDODunzps5njyxD
5,923
Cannot import datasets - ValueError: pyarrow.lib.IpcWriteOptions size changed, may indicate binary incompatibility
{ "login": "ehuangc", "id": 71412682, "node_id": "MDQ6VXNlcjcxNDEyNjgy", "avatar_url": "https://avatars.githubusercontent.com/u/71412682?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ehuangc", "html_url": "https://github.com/ehuangc", "followers_url": "https://api.github.com/users/ehuangc/followers", "following_url": "https://api.github.com/users/ehuangc/following{/other_user}", "gists_url": "https://api.github.com/users/ehuangc/gists{/gist_id}", "starred_url": "https://api.github.com/users/ehuangc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ehuangc/subscriptions", "organizations_url": "https://api.github.com/users/ehuangc/orgs", "repos_url": "https://api.github.com/users/ehuangc/repos", "events_url": "https://api.github.com/users/ehuangc/events{/privacy}", "received_events_url": "https://api.github.com/users/ehuangc/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Based on https://github.com/rapidsai/cudf/issues/10187, this probably means your `pyarrow` installation is not compatible with `datasets`.\r\n\r\nCan you please execute the following commands in the terminal and paste the output here?\r\n```\r\nconda list | grep arrow\r\n``` \r\n```\r\npython -c \"import pyarrow; print(pyarrow.__file__)\"\r\n```\r\n\r\n\r\n", "> Based on [rapidsai/cudf#10187](https://github.com/rapidsai/cudf/issues/10187), this probably means your `pyarrow` installation is not compatible with `datasets`.\r\n> \r\n> Can you please execute the following commands in the terminal and paste the output here?\r\n> \r\n> ```\r\n> conda list | grep arrow\r\n> ```\r\n> \r\n> ```\r\n> python -c \"import pyarrow; print(pyarrow.__file__)\"\r\n> ```\r\n\r\n\r\nHere is the output to the first command:\r\n```\r\narrow-cpp 11.0.0 py39h7f74497_0 \r\npyarrow 12.0.0 pypi_0 pypi\r\n```\r\nand the second:\r\n```\r\n/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/pyarrow/__init__.py\r\n```\r\nThanks!\r\n\r\n\r\n\r\n", "after installing pytesseract 0.3.10, I got the above error. FYI ", "RuntimeError: Failed to import transformers.trainer because of the following error (look up to see its traceback):\r\npyarrow.lib.IpcWriteOptions size changed, may indicate binary incompatibility. Expected 88 from C header, got 72 from PyObject", "I got the same error, pyarrow 12.0.0 released May/2023 (https://pypi.org/project/pyarrow/) is not compatible, running `pip install pyarrow==11.0.0` to force install the previous version solved the problem.\r\n\r\nDo we need to update dependencies? ", "Please note that our CI properly passes all tests with `pyarrow-12.0.0`, for Python 3.7 and Python 3.10, for Ubuntu and Windows: see for example https://github.com/huggingface/datasets/actions/runs/5157324334/jobs/9289582291", "For conda with python3.8.16 this solved my problem! thanks!\r\n\r\n> I got the same error, pyarrow 12.0.0 released May/2023 (https://pypi.org/project/pyarrow/) is not compatible, running `pip install pyarrow==11.0.0` to force install the previous version solved the problem.\r\n> \r\n> Do we need to update dependencies? I can work on that if no one else is working on it.\r\n\r\n", "Thanks for replying. I am not sure about those environments but it seems like pyarrow-12.0.0 does not work for conda with python 3.8.16. \r\n\r\n> Please note that our CI properly passes all tests with `pyarrow-12.0.0`, for Python 3.7 and Python 3.10, for Ubuntu and Windows: see for example https://github.com/huggingface/datasets/actions/runs/5157324334/jobs/9289582291\r\n\r\n", "Got the same error with:\r\n\r\n```\r\narrow-cpp 11.0.0 py310h7516544_0 \r\npyarrow 12.0.0 pypi_0 pypi\r\n\r\npython 3.10.11 h7a1cb2a_2 \r\n\r\ndatasets 2.13.0 pyhd8ed1ab_0 conda-forge\r\n```", "> I got the same error, pyarrow 12.0.0 released May/2023 (https://pypi.org/project/pyarrow/) is not compatible, running `pip install pyarrow==11.0.0` to force install the previous version solved the problem.\r\n> \r\n> Do we need to update dependencies?\r\n\r\nThis solved the issue for me as well.", "> I got the same error, pyarrow 12.0.0 released May/2023 (https://pypi.org/project/pyarrow/) is not compatible, running `pip install pyarrow==11.0.0` to force install the previous version solved the problem.\r\n> \r\n> Do we need to update dependencies?\r\n\r\nSolved it for me also", "> 基于 [rapidsai/cudf#10187](https://github.com/rapidsai/cudf/issues/10187),这可能意味着您的安装与 不兼容。`pyarrow``datasets`\r\n> \r\n> 您能否在终端中执行以下命令并将输出粘贴到此处?\r\n> \r\n> ```\r\n> conda list | grep arrow\r\n> ```\r\n> \r\n> ```\r\n> python -c \"import pyarrow; print(pyarrow.__file__)\"\r\n> ```\r\n\r\narrow-cpp 11.0.0 py310h7516544_0 \r\npyarrow 12.0.1 pypi_0 pypi\r\n\r\n/root/miniconda3/lib/python3.10/site-packages/pyarrow/__init__.py", "Got the same problem with\r\n\r\narrow-cpp 11.0.0 py310h1fc3239_0 \r\npyarrow 12.0.1 pypi_0 pypi\r\n\r\nminiforge3/envs/mlp/lib/python3.10/site-packages/pyarrow/__init__.py\r\n\r\nReverting back to pyarrow 11 solved the problem.\r\n", "Solved with `pip install pyarrow==11.0.0`", "I got different. Solved with\r\npip install pyarrow==12.0.1\r\npip install cchardet\r\n\r\nenv:\r\nPython 3.9.16\r\ntransformers 4.32.1", "> I got the same error, pyarrow 12.0.0 released May/2023 (https://pypi.org/project/pyarrow/) is not compatible, running `pip install pyarrow==11.0.0` to force install the previous version solved the problem.\r\n> \r\n> Do we need to update dependencies?\r\n\r\nThis works for me as well", "> I got different. Solved with pip install pyarrow==12.0.1 pip install cchardet\r\n> \r\n> env: Python 3.9.16 transformers 4.32.1\r\n\r\nI guess it also depends on the Python version. I got Python 3.11.5 and pyarrow==12.0.0. \r\nIt works! ", "Hi, if this helps anyone, pip install pyarrow==11.0.0 did not work for me (I'm using Colab) but this worked: \r\n!pip install --extra-index-url=https://pypi.nvidia.com cudf-cu11", "> Hi, if this helps anyone, pip install pyarrow==11.0.0 did not work for me (I'm using Colab) but this worked: !pip install --extra-index-url=https://pypi.nvidia.com cudf-cu11\r\n\r\nthanks! I met the same problem and your suggestion solved it.", "(I was doing quiet install so I didn't notice it initially)\r\nI've been loading the same dataset for months on Colab, just now I got this error as well. I think Colab has changed their image recently (I had some errors regarding CUDA previously as well). beware of this and restart runtime if you're doing quite pip installs.\r\nmoreover installing stable version of datasets on pypi gives this:\r\n\r\n```\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nibis-framework 7.1.0 requires pyarrow<15,>=2, but you have pyarrow 15.0.0 which is incompatible.\r\nSuccessfully installed datasets-2.17.0 dill-0.3.8 multiprocess-0.70.16 pyarrow-15.0.0\r\nWARNING: The following packages were previously imported in this runtime:\r\n [pyarrow]\r\nYou must restart the runtime in order to use newly installed versions.\r\n``` \r\n", "for colab - pip install pyarrow==11.0.0", "The above methods didn't help me. So I installed an older version: `!pip install datasets==2.16.1`\r\nand `import datasets` worked!!", "@rasith1998 @PennlaineChu You can avoid this issue by restarting the session after the `datasets` installation (see https://github.com/huggingface/datasets/issues/6661 for more info)\r\n\r\nAlso, we've contacted Google Colab folks to update the default PyArrow installation, so the issue should soon be \"officially\" resolved on their side.", "> Also, we've contacted Google Colab folks to update the default PyArrow installation, so the issue should soon be \"officially\" resolved on their side.\r\n\r\nThis has been done! Google Colab now pre-installs PyArrow 14.0.2, which makes this issue unlikely to happen, so I'm closing it." ]
2023-06-02T04:16:32
2024-02-25T16:38:03
2024-02-25T16:38:03
NONE
null
null
null
### Describe the bug When trying to import datasets, I get a pyarrow ValueError: Traceback (most recent call last): File "/Users/edward/test/test.py", line 1, in <module> import datasets File "/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/datasets/__init__.py", line 43, in <module> from .arrow_dataset import Dataset File "/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 65, in <module> from .arrow_reader import ArrowReader File "/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/datasets/arrow_reader.py", line 28, in <module> import pyarrow.parquet as pq File "/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/pyarrow/parquet/__init__.py", line 20, in <module> from .core import * File "/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 45, in <module> from pyarrow.fs import (LocalFileSystem, FileSystem, FileType, File "/Users/edward/opt/anaconda3/envs/cs235/lib/python3.9/site-packages/pyarrow/fs.py", line 49, in <module> from pyarrow._gcsfs import GcsFileSystem # noqa File "pyarrow/_gcsfs.pyx", line 1, in init pyarrow._gcsfs ValueError: pyarrow.lib.IpcWriteOptions size changed, may indicate binary incompatibility. Expected 88 from C header, got 72 from PyObject ### Steps to reproduce the bug `import datasets` ### Expected behavior Successful import ### Environment info Conda environment, MacOS python 3.9.12 datasets 2.12.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5923/reactions", "total_count": 6, "+1": 6, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5923/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5922
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5922/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5922/comments
https://api.github.com/repos/huggingface/datasets/issues/5922/events
https://github.com/huggingface/datasets/issues/5922
1,736,898,953
I_kwDODunzps5nhvmJ
5,922
Length of table does not accurately reflect the split
{ "login": "amogkam", "id": 8068268, "node_id": "MDQ6VXNlcjgwNjgyNjg=", "avatar_url": "https://avatars.githubusercontent.com/u/8068268?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amogkam", "html_url": "https://github.com/amogkam", "followers_url": "https://api.github.com/users/amogkam/followers", "following_url": "https://api.github.com/users/amogkam/following{/other_user}", "gists_url": "https://api.github.com/users/amogkam/gists{/gist_id}", "starred_url": "https://api.github.com/users/amogkam/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amogkam/subscriptions", "organizations_url": "https://api.github.com/users/amogkam/orgs", "repos_url": "https://api.github.com/users/amogkam/repos", "events_url": "https://api.github.com/users/amogkam/events{/privacy}", "received_events_url": "https://api.github.com/users/amogkam/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892913, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEz", "url": "https://api.github.com/repos/huggingface/datasets/labels/wontfix", "name": "wontfix", "color": "ffffff", "default": true, "description": "This will not be worked on" } ]
closed
false
null
[]
null
[ "As already replied by @lhoestq (private channel):\r\n> `.train_test_split` (as well as `.shard`, `.select`) doesn't create a new arrow table to save time and disk space. Instead, it uses an indices mapping on top of the table that locate which examples are part of train or test.", "This is an optimization that we don't plan to \"fix\", so I'm closing this issue." ]
2023-06-01T18:56:26
2023-06-02T16:13:31
2023-06-02T16:13:31
NONE
null
null
null
### Describe the bug I load a Huggingface Dataset and do `train_test_split`. I'm expecting the underlying table for the dataset to also be split, but it's not. ### Steps to reproduce the bug ![image](https://github.com/huggingface/datasets/assets/8068268/83e5768f-8b4c-422a-945c-832a7585afff) ### Expected behavior The expected behavior is when `len(hf_dataset["train"].data)` should match the length of the train split, and not be the entire unsplit dataset. ### Environment info datasets 2.10.1 python 3.10.11
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5922/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5922/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5918
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5918/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5918/comments
https://api.github.com/repos/huggingface/datasets/issues/5918/events
https://github.com/huggingface/datasets/issues/5918
1,735,313,549
I_kwDODunzps5nbsiN
5,918
File not found for audio dataset
{ "login": "RobertBaruch", "id": 1783950, "node_id": "MDQ6VXNlcjE3ODM5NTA=", "avatar_url": "https://avatars.githubusercontent.com/u/1783950?v=4", "gravatar_id": "", "url": "https://api.github.com/users/RobertBaruch", "html_url": "https://github.com/RobertBaruch", "followers_url": "https://api.github.com/users/RobertBaruch/followers", "following_url": "https://api.github.com/users/RobertBaruch/following{/other_user}", "gists_url": "https://api.github.com/users/RobertBaruch/gists{/gist_id}", "starred_url": "https://api.github.com/users/RobertBaruch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RobertBaruch/subscriptions", "organizations_url": "https://api.github.com/users/RobertBaruch/orgs", "repos_url": "https://api.github.com/users/RobertBaruch/repos", "events_url": "https://api.github.com/users/RobertBaruch/events{/privacy}", "received_events_url": "https://api.github.com/users/RobertBaruch/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "load_dataset () did not work for loading local files either " ]
2023-06-01T02:15:29
2023-06-11T06:02:25
null
NONE
null
null
null
### Describe the bug After loading an audio dataset, and looking at a sample entry, the `path` element, which is supposed to be the path to the audio file, doesn't actually exist. ### Steps to reproduce the bug Run bug.py: ```py import os.path from datasets import load_dataset def run() -> None: cv13 = load_dataset( "mozilla-foundation/common_voice_13_0", "hi", split="train", ) print(cv13[0]) audio_file = cv13[0]["path"] if not os.path.exists(audio_file): raise ValueError(f'File {audio_file} does not exist.') if __name__ == "__main__": run() ``` The result (on my machine): ```json {'client_id': '0f018a99663f33afbb7d38aee281fb1afcfd07f9e7acd00383f604e1e17c38d6ed8adf1bd2ccbf927a52c5adefb8ac4b158ce27a7c2ed9581e71202eb302dfb3', 'path': 'C:\\Users\\rober\\.cache\\huggingface\\datasets\\downloads\\extracted\\8d1479bc09b4609bc2675bd02d6869a4d5e09f7e6616f540bd55eacef46c6e2b\\common_voice_hi_26008353.mp3', 'audio': {'path': 'C:\\Users\\rober\\.cache\\huggingface\\datasets\\downloads\\extracted\\8d1479bc09b4609bc2675bd02d6869a4d5e09f7e6616f540bd55eacef46c6e2b\\common_voice_hi_26008353.mp3', 'array': array([ 6.46234854e-26, -1.35709319e-25, -8.07793567e-26, ..., 1.06425944e-07, 4.46417090e-08, 2.61451660e-09]), 'sampling_rate': 48000}, 'sentence': 'हमने उसका जन्मदिन मनाया।', 'up_votes': 2, 'down_votes': 0, 'age': '', 'gender': '', 'accent': '', 'locale': 'hi', 'segment': '' ', 'variant': ''} ``` ```txt Traceback (most recent call last): File "F:\eo-reco\bug.py", line 18, in <module> run() File "F:\eo-reco\bug.py", line 15, in run raise ValueError(f'File {audio_file} does not exist.') ValueError: File C:\Users\rober\.cache\huggingface\datasets\downloads\extracted\8d1479bc09b4609bc2675bd02d6869a4d5e09f7e6616f540bd55eacef46c6e2b\common_voice_hi_26008353.mp3 does not exist. ``` ### Expected behavior The `path` element points to the correct file, which happens to be: ``` C:\Users\rober\.cache\huggingface\datasets\downloads\extracted\8d1479bc09b4609bc2675bd02d6869a4d5e09f7e6616f540bd55eacef46c6e2b\hi_train_0\common_voice_hi_26008353.mp3 ``` That is, there's an extra directory `hi_train_0` that is not in the `path` element. ### Environment info - `datasets` version: 2.12.0 - Platform: Windows-10-10.0.22621-SP0 - Python version: 3.11.3 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1 -
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5918/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5918/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5914
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5914/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5914/comments
https://api.github.com/repos/huggingface/datasets/issues/5914/events
https://github.com/huggingface/datasets/issues/5914
1,731,483,996
I_kwDODunzps5nNFlc
5,914
array is too big; `arr.size * arr.dtype.itemsize` is larger than the maximum possible size in Datasets
{ "login": "ravenouse", "id": 85110830, "node_id": "MDQ6VXNlcjg1MTEwODMw", "avatar_url": "https://avatars.githubusercontent.com/u/85110830?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ravenouse", "html_url": "https://github.com/ravenouse", "followers_url": "https://api.github.com/users/ravenouse/followers", "following_url": "https://api.github.com/users/ravenouse/following{/other_user}", "gists_url": "https://api.github.com/users/ravenouse/gists{/gist_id}", "starred_url": "https://api.github.com/users/ravenouse/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ravenouse/subscriptions", "organizations_url": "https://api.github.com/users/ravenouse/orgs", "repos_url": "https://api.github.com/users/ravenouse/repos", "events_url": "https://api.github.com/users/ravenouse/events{/privacy}", "received_events_url": "https://api.github.com/users/ravenouse/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[]
2023-05-30T04:25:00
2023-05-30T04:25:00
null
NONE
null
null
null
### Describe the bug When using the `filter` or `map` function to preprocess a dataset, a ValueError is encountered with the error message "array is too big; arr.size * arr.dtype.itemsize is larger than the maximum possible size." Detailed error message: Traceback (most recent call last): File "data_processing.py", line 26, in <module> processed_dataset[split] = samromur_children[split].map(prepare_dataset, cache_file_name=cache_dict[split],writer_batch_size = 50) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2405, in map desc=desc, File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 557, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 524, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/fingerprint.py", line 480, in wrapper out = func(self, *args, **kwargs) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2756, in _map_single example = apply_function_on_filtered_inputs(example, i, offset=offset) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2655, in apply_function_on_filtered_inputs processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 2347, in decorated result = f(decorated_item, *args, **kwargs) File "data_processing.py", line 11, in prepare_dataset audio = batch["audio"] File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 123, in __getitem__ value = decode_nested_example(self.features[key], value) if value is not None else None File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/features/features.py", line 1260, in decode_nested_example return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) if obj is not None else None File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/features/audio.py", line 156, in decode_example array, sampling_rate = self._decode_non_mp3_path_like(path, token_per_repo_id=token_per_repo_id) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/datasets/features/audio.py", line 257, in _decode_non_mp3_path_like array, sampling_rate = librosa.load(f, sr=self.sampling_rate, mono=self.mono) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/librosa/core/audio.py", line 176, in load y, sr_native = __soundfile_load(path, offset, duration, dtype) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/librosa/core/audio.py", line 222, in __soundfile_load y = sf_desc.read(frames=frame_duration, dtype=dtype, always_2d=False).T File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/soundfile.py", line 891, in read out = self._create_empty_array(frames, always_2d, dtype) File "/projects/zhwa3087/software/anaconda/envs/mycustomenv/lib/python3.7/site-packages/soundfile.py", line 1323, in _create_empty_array return np.empty(shape, dtype, order='C') ValueError: array is too big; `arr.size * arr.dtype.itemsize` is larger than the maximum possible size. ### Steps to reproduce the bug ```python from datasets import load_dataset, DatasetDict from transformers import WhisperFeatureExtractor from transformers import WhisperTokenizer samromur_children= load_dataset("language-and-voice-lab/samromur_children") feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small") tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="icelandic", task="transcribe") def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=16000).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["normalized_text"]).input_ids return batch cache_dict = {"train": "./cache/audio_train.cache", \ "validation": "./cache/audio_validation.cache", \ "test": "./cache/audio_test.cache"} filter_cache_dict = {"train": "./cache/filter_train.arrow", \ "validation": "./cache/filter_validation.arrow", \ "test": "./cache/filter_test.arrow"} print("before filtering") print(samromur_children) #filter the dataset to only include examples with more than 2 seconds of audio samromur_children = samromur_children.filter(lambda example: example["audio"]["array"].shape[0] > 16000*2, cache_file_names=filter_cache_dict) print("after filtering") print(samromur_children) processed_dataset = DatasetDict() # processed_dataset = samromur_children.map(prepare_dataset, cache_file_names=cache_dict, num_proc=10,) for split in ["train", "validation", "test"]: processed_dataset[split] = samromur_children[split].map(prepare_dataset, cache_file_name=cache_dict[split]) ``` ### Expected behavior The dataset is successfully processed and ready to train the model. ### Environment info Python version: 3.7.13 datasets package version: 2.4.0 librosa package version: 0.10.0.post2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5914/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5914/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5913
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5913/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5913/comments
https://api.github.com/repos/huggingface/datasets/issues/5913/events
https://github.com/huggingface/datasets/issues/5913
1,731,427,484
I_kwDODunzps5nM3yc
5,913
I tried to load a custom dataset using the following statement: dataset = load_dataset('json', data_files=data_files). The dataset contains 50 million text-image pairs, but an error occurred.
{ "login": "cjt222", "id": 17508662, "node_id": "MDQ6VXNlcjE3NTA4NjYy", "avatar_url": "https://avatars.githubusercontent.com/u/17508662?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cjt222", "html_url": "https://github.com/cjt222", "followers_url": "https://api.github.com/users/cjt222/followers", "following_url": "https://api.github.com/users/cjt222/following{/other_user}", "gists_url": "https://api.github.com/users/cjt222/gists{/gist_id}", "starred_url": "https://api.github.com/users/cjt222/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cjt222/subscriptions", "organizations_url": "https://api.github.com/users/cjt222/orgs", "repos_url": "https://api.github.com/users/cjt222/repos", "events_url": "https://api.github.com/users/cjt222/events{/privacy}", "received_events_url": "https://api.github.com/users/cjt222/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Thanks for reporting, @cjt222.\r\n\r\nWhat is the structure of your JSON files. Please note that it is normally simpler if the data file format is JSON-Lines instead. ", "> Thanks for reporting, @cjt222.\r\n> \r\n> What is the structure of your JSON files. Please note that it is normally simpler if the data file format is JSON-Lines instead.\r\n\r\nThanks! I have encountered similar problems. I modify the json format from list to line and works!" ]
2023-05-30T02:55:26
2023-07-24T12:00:38
2023-07-24T12:00:38
NONE
null
null
null
### Describe the bug File "/home/kas/.conda/envs/diffusers/lib/python3.7/site-packages/datasets/builder.py", line 1858, in _prepare_split_single Downloading and preparing dataset json/default to /home/kas/diffusers/examples/dreambooth/cache_data/datasets/json/default-acf423d8c6ef99d0/0.0.0/e347ab1c932092252e717ff3f949105a4dd28b27e842dd53157d2f72e276c2e4... Downloading data files: 0%| | 0/1 [00:00<?, ?it/s] Downloading data files: 100%|██████████| 1/1 [00:00<00:00, 84.35it/s] Extracting data files: 0%| | 0/1 [00:00<?, ?it/s] for _, table in generator: File "/home/kas/.conda/envs/diffusers/lib/python3.7/site-packages/datasets/packaged_modules/json/json.py", line 114, in _generate_tables io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) File "pyarrow/_json.pyx", line 258, in pyarrow._json.read_json Extracting data files: 100%|██████████| 1/1 [00:00<00:00, 27.72it/s] Generating train split: 0 examples [00:00, ? examples/s] File "pyarrow/error.pxi", line 144, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 125, in pyarrow.lib.check_status pyarrow.lib.ArrowCapacityError: array cannot contain more than 2147483646 bytes, have 2390448764 ### Steps to reproduce the bug 1、data_files = ["1.json", "2.json", "3.json"] 2、dataset = load_dataset('json', data_files=data_files) ### Expected behavior Read the dataset normally. ### Environment info - `datasets` version: 2.12.0 - Platform: Linux-4.15.0-29-generic-x86_64-with-debian-buster-sid - Python version: 3.7.16 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5913/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5913/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5912
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5912/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5912/comments
https://api.github.com/repos/huggingface/datasets/issues/5912/events
https://github.com/huggingface/datasets/issues/5912
1,730,299,852
I_kwDODunzps5nIkfM
5,912
Missing elements in `map` a batched dataset
{ "login": "sachinruk", "id": 1410927, "node_id": "MDQ6VXNlcjE0MTA5Mjc=", "avatar_url": "https://avatars.githubusercontent.com/u/1410927?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinruk", "html_url": "https://github.com/sachinruk", "followers_url": "https://api.github.com/users/sachinruk/followers", "following_url": "https://api.github.com/users/sachinruk/following{/other_user}", "gists_url": "https://api.github.com/users/sachinruk/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinruk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinruk/subscriptions", "organizations_url": "https://api.github.com/users/sachinruk/orgs", "repos_url": "https://api.github.com/users/sachinruk/repos", "events_url": "https://api.github.com/users/sachinruk/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinruk/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! in your code batching is **only used within** `map`, to process examples in batch. The dataset itself however is not batched and returns elements one by one.\r\n\r\nTo iterate on batches, you can do\r\n```python\r\nfor batch in dataset.iter(batch_size=8):\r\n ...\r\n```" ]
2023-05-29T08:09:19
2023-07-26T15:48:15
2023-07-26T15:48:15
NONE
null
null
null
### Describe the bug As outlined [here](https://discuss.huggingface.co/t/length-error-using-map-with-datasets/40969/3?u=sachin), the following collate function drops 5 out of possible 6 elements in the batch (it is 6 because out of the eight, two are bad links in laion). A reproducible [kaggle kernel ](https://www.kaggle.com/sachin/laion-hf-dataset/edit) can be found here. The weirdest part is when inspecting the sizes of the tensors as shown below, both `tokenized_captions["input_ids"]` and `image_features` show the correct shapes. Simply the output only has one element (with the batch dimension squeezed out). ```python class CollateFn: def get_image(self, url): try: response = requests.get(url) return Image.open(io.BytesIO(response.content)).convert("RGB") except PIL.UnidentifiedImageError: logger.info(f"Reading error: Could not transform f{url}") return None except requests.exceptions.ConnectionError: logger.info(f"Connection error: Could not transform f{url}") return None def __call__(self, batch): images = [self.get_image(url) for url in batch["url"]] captions = [caption for caption, image in zip(batch["caption"], images) if image is not None] images = [image for image in images if image is not None] tokenized_captions = tokenizer( captions, padding="max_length", truncation=True, max_length=tokenizer.model_max_length, return_tensors="pt", ) image_features = torch.stack([torch.Tensor(feature_extractor(image)["pixel_values"][0]) for image in images]) # import pdb; pdb.set_trace() return {"input_ids": tokenized_captions["input_ids"], "images": image_features} collate_fn = CollateFn() laion_ds = datasets.load_dataset("laion/laion400m", split="train", streaming=True) laion_ds_batched = laion_ds.map(collate_fn, batched=True, batch_size=8, remove_columns=next(iter(laion_ds)).keys()) ``` ### Steps to reproduce the bug A reproducible [kaggle kernel ](https://www.kaggle.com/sachin/laion-hf-dataset/edit) can be found here. ### Expected behavior Would expect `next(iter(laion_ds_batched))` to produce two tensors of shape `(batch_size, 77)` and `batch_size, image_shape`. ### Environment info datasets==2.12.0 python==3.10
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5912/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5912/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5910
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5910/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5910/comments
https://api.github.com/repos/huggingface/datasets/issues/5910/events
https://github.com/huggingface/datasets/issues/5910
1,728,909,790
I_kwDODunzps5nDRHe
5,910
Cannot use both set_format and set_transform
{ "login": "ybouane", "id": 14046002, "node_id": "MDQ6VXNlcjE0MDQ2MDAy", "avatar_url": "https://avatars.githubusercontent.com/u/14046002?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ybouane", "html_url": "https://github.com/ybouane", "followers_url": "https://api.github.com/users/ybouane/followers", "following_url": "https://api.github.com/users/ybouane/following{/other_user}", "gists_url": "https://api.github.com/users/ybouane/gists{/gist_id}", "starred_url": "https://api.github.com/users/ybouane/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ybouane/subscriptions", "organizations_url": "https://api.github.com/users/ybouane/orgs", "repos_url": "https://api.github.com/users/ybouane/repos", "events_url": "https://api.github.com/users/ybouane/events{/privacy}", "received_events_url": "https://api.github.com/users/ybouane/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Currently, it's not possible to chain `set_format`/`set_transform` calls (plus, this is a breaking change if we decide to implement it), so I see two possible solutions:\r\n* using `set_format`/`set_transform` for the 1st transform and then passing the transformed example/batch to the 2nd transform\r\n* implementing and registering a custom formatter (the relevant code is [here](https://github.com/huggingface/datasets/tree/main/src/datasets/formatting))\r\n\r\nBtw, your example requires a single `set_format` call:\r\n```python\r\nds.set_format(\"torch\", columns=[\"image\"], output_all_columns=True, dtype=torch.double)\r\n```", "Hey Mario,\r\nThanks, for getting back to me. the toDouble was just an example my real life case requires many more transforms.\r\n\r\nWhat do you mean by:\r\n> using set_format/set_transform for the 1st transform and then passing the transformed example/batch to the 2nd transform\r\n\r\nHow would that go, I thought you can't chain them?\r\n\r\nAs for the custom formatter, is it possible to reference an existing formatter, in my case `torch_formatter` inside of my custom formatter?\r\n\r\nmaybe I can inherit from it and just call `super.recursive_tensorize()`?", "> How would that go, I thought you can't chain them?\r\n\r\nYes, they cannot be chained. This is what I meant:\r\n```python\r\nds.set_transform(first_transform)\r\n# calling the 2nd transform on each accessed batch\r\nsecond_transform(ds[2:3])\r\n```\r\n\r\n> As for the custom formatter, is it possible to reference an existing formatter, in my case torch_formatter inside of my custom formatter?\r\n>\r\n>maybe I can inherit from it and just call super.recursive_tensorize()?\r\n\r\nYes, subclassing makes the most sense.", "Great, thank you for the details.", "https://github.com/huggingface/datasets/issues/6012" ]
2023-05-27T19:22:23
2023-07-09T21:40:54
2023-06-16T14:41:24
NONE
null
null
null
### Describe the bug I need to process some data using the set_transform method but I also need the data to be formatted for pytorch before processing it. I don't see anywhere in the documentation something that says that both methods cannot be used at the same time. ### Steps to reproduce the bug ``` from datasets import load_dataset ds = load_dataset("mnist", split="train") ds.set_format(type="torch") def transform(entry): return entry["image"].double() ds.set_transform(transform) print(ds[0]) ``` ### Expected behavior It should print the pytorch tensor image as a double, but it errors because "entry" in the transform function doesn't receive a pytorch tensor to begin with, it receives a PIL Image -> entry.double() errors because entry isn't a pytorch tensor. ### Environment info Latest versions. ### Note: It would be at least handy to have access to a function that can do the dataset.set_format in the set_transform function. Something like: ``` from datasets import load_dataset, do_format ds = load_dataset("mnist", split="train") def transform(entry): entry = do_format(entry, type="torch") return entry["image"].double() ds.set_transform(transform) print(ds[0]) ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5910/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5910/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5908
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5908/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5908/comments
https://api.github.com/repos/huggingface/datasets/issues/5908/events
https://github.com/huggingface/datasets/issues/5908
1,728,653,935
I_kwDODunzps5nCSpv
5,908
Unbearably slow sorting on big mapped datasets
{ "login": "maximxlss", "id": 29152154, "node_id": "MDQ6VXNlcjI5MTUyMTU0", "avatar_url": "https://avatars.githubusercontent.com/u/29152154?v=4", "gravatar_id": "", "url": "https://api.github.com/users/maximxlss", "html_url": "https://github.com/maximxlss", "followers_url": "https://api.github.com/users/maximxlss/followers", "following_url": "https://api.github.com/users/maximxlss/following{/other_user}", "gists_url": "https://api.github.com/users/maximxlss/gists{/gist_id}", "starred_url": "https://api.github.com/users/maximxlss/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maximxlss/subscriptions", "organizations_url": "https://api.github.com/users/maximxlss/orgs", "repos_url": "https://api.github.com/users/maximxlss/repos", "events_url": "https://api.github.com/users/maximxlss/events{/privacy}", "received_events_url": "https://api.github.com/users/maximxlss/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Hi ! `shard` currently returns a slow dataset by default, with examples evenly distributed in the dataset.\r\n\r\nYou can get a fast dataset using `contiguous=True` (which should be the default imo):\r\n\r\n```python\r\ndataset = dataset.shard(10, 0, contiguous=True)\r\n```\r\n\r\nThis way you don't need to flatten_indices() and sort should be fast as well", "@lhoestq \r\n\r\n> contiguous=True (which should be the default imo)\r\n\r\nFor `IterableDataset`, it's not possible to implement contiguous sharding without knowing the number of examples in advance, so setting the default value to `contiguous=True` would result in an inconsistency between `Dataset` and `IterableDataset` (when we add `IterableDataset.shard`)", "Actually sharded iterable datasets are made of sub iterables that generally yield contiguous data no ? So in a way it's possible to shard an iterable dataset contiguously.\r\n\r\nIf the dataset is made of one shard it's indeed not possible to shard it contiguously though", "> Actually sharded iterable datasets are made of sub iterables that generally yield contiguous data no ? So in a way it's possible to shard an iterable dataset contiguously.\r\n\r\nBut sharding an iterable dataset by sharding its `gen_kwargs` would still yield approximate shards(not equal to `Dataset.shard`), no? ", "Yes indeed !", "I understand the issue doesn't exist with non-mapped datasets, but if flattening is so much more efficient than sorting the indices, that's an issue in itself.\n\nThere are plenty of issues people posted for which the root cause turns out to be the same. It seems like mapped datasets are terribly inefficient. I think I saw some issue like that somewhere (about the mapped datasets in general), but can't find it now.\n\nMaybe indices should be flattened before any additional processing, then." ]
2023-05-27T11:08:32
2023-06-13T17:45:10
null
CONTRIBUTOR
null
null
null
### Describe the bug For me, with ~40k lines, sorting took 3.5 seconds on a flattened dataset (including the flatten operation) and 22.7 seconds on a mapped dataset (right after sharding), which is about x5 slowdown. Moreover, it seems like it slows down exponentially with bigger datasets (wasn't able to sort 700k lines at all, with flattening takes about a minute). ### Steps to reproduce the bug ```Python from datasets import load_dataset import time dataset = load_dataset("xnli", "en", split="train") dataset = dataset.shard(10, 0) print(len(dataset)) t = time.time() # dataset = dataset.flatten_indices() # uncomment this line and it's fast dataset = dataset.sort("label", reverse=True, load_from_cache_file=False) print(f"finished in {time.time() - t:.4f} seconds") ``` ### Expected behavior Expect sorting to take the same or less time than flattening and then sorting. ### Environment info - `datasets` version: 2.12.1.dev0 (same with 2.12.0 too) - Platform: Windows-10-10.0.22621-SP0 - Python version: 3.10.10 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5908/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5908/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5906
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5906/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5906/comments
https://api.github.com/repos/huggingface/datasets/issues/5906/events
https://github.com/huggingface/datasets/issues/5906
1,728,171,113
I_kwDODunzps5nAcxp
5,906
Could you unpin responses version?
{ "login": "kenimou", "id": 47789026, "node_id": "MDQ6VXNlcjQ3Nzg5MDI2", "avatar_url": "https://avatars.githubusercontent.com/u/47789026?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kenimou", "html_url": "https://github.com/kenimou", "followers_url": "https://api.github.com/users/kenimou/followers", "following_url": "https://api.github.com/users/kenimou/following{/other_user}", "gists_url": "https://api.github.com/users/kenimou/gists{/gist_id}", "starred_url": "https://api.github.com/users/kenimou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kenimou/subscriptions", "organizations_url": "https://api.github.com/users/kenimou/orgs", "repos_url": "https://api.github.com/users/kenimou/repos", "events_url": "https://api.github.com/users/kenimou/events{/privacy}", "received_events_url": "https://api.github.com/users/kenimou/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2023-05-26T20:02:14
2023-05-30T17:53:31
2023-05-30T17:53:31
NONE
null
null
null
### Describe the bug Could you unpin [this](https://github.com/huggingface/datasets/blob/main/setup.py#L139) or move it to test requirements? This is a testing library and we also use it for our tests as well. We do not want to use a very outdated version. ### Steps to reproduce the bug could not install this library due to dependency conflict. ### Expected behavior can install datasets ### Environment info linux 64
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5906/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5906/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5905
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5905/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5905/comments
https://api.github.com/repos/huggingface/datasets/issues/5905/events
https://github.com/huggingface/datasets/issues/5905
1,727,541,392
I_kwDODunzps5m-DCQ
5,905
Offer an alternative to Iterable Dataset that allows lazy loading and processing while skipping batches efficiently
{ "login": "Hubert-Bonisseur", "id": 48770768, "node_id": "MDQ6VXNlcjQ4NzcwNzY4", "avatar_url": "https://avatars.githubusercontent.com/u/48770768?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Hubert-Bonisseur", "html_url": "https://github.com/Hubert-Bonisseur", "followers_url": "https://api.github.com/users/Hubert-Bonisseur/followers", "following_url": "https://api.github.com/users/Hubert-Bonisseur/following{/other_user}", "gists_url": "https://api.github.com/users/Hubert-Bonisseur/gists{/gist_id}", "starred_url": "https://api.github.com/users/Hubert-Bonisseur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Hubert-Bonisseur/subscriptions", "organizations_url": "https://api.github.com/users/Hubert-Bonisseur/orgs", "repos_url": "https://api.github.com/users/Hubert-Bonisseur/repos", "events_url": "https://api.github.com/users/Hubert-Bonisseur/events{/privacy}", "received_events_url": "https://api.github.com/users/Hubert-Bonisseur/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "We plan to improve this eventually (see https://github.com/huggingface/datasets/issues/5454 and https://github.com/huggingface/datasets/issues/5380).\r\n\r\n> Is it possible to lazily load samples of a mapped dataset ? I'm used to [dataset scripts](https://huggingface.co/docs/datasets/dataset_script), maybe something can be done there.\r\nIf not, I could do it using a plain Pytorch dataset. Then I would need to convert it to a datasets' dataset to get all the features of datasets. Is it something possible ?\r\n\r\nYes, by creating a mapped dataset that stores audio URLs. Indexing a dataset in such format only downloads and decodes the bytes of the accessed samples (without storing them on disk).\r\n\r\nYou can do the following to create this dataset:\r\n```python\r\n\r\ndef gen():\r\n # Generator that yields (audio URL, text) pairs as dict\r\n ...\r\n yield {\"audio\": \"audio_url\", \"text\": \"some text\"}\r\n\r\nfeatures = Features({\"audio\": datasets.Audio(), \"text\": datasets.Value(\"string\")})\r\nds = Dataset.from_generator(gen, features=features)\r\nds[2:5] # downloads and decodes the samples each time they are accessed\r\n```" ]
2023-05-26T12:33:02
2023-06-15T13:34:18
null
CONTRIBUTOR
null
null
null
### Feature request I would like a way to resume training from a checkpoint without waiting for a very long time when using an iterable dataset. ### Motivation I am training models on the speech-recognition task. I have very large datasets that I can't comfortably store on a disk and also quite computationally intensive audio processing to do. As a result I want to load data from my remote when it is needed and perform all processing on the fly. I am currently using the iterable dataset feature of _datasets_. It does everything I need with one exception. My issue is that when resuming training at a step n, we have to download all the data and perform the processing of steps < n, just to get the iterable at the right step. In my case it takes almost as long as training for the same steps, which make resuming training from a checkpoint useless in practice. I understand that the nature of iterators make it probably nearly impossible to quickly resume training. I thought about a possible solution nonetheless : I could in fact index my large dataset and make it a mapped dataset. Then I could use set_transform to perform the processing on the fly. Finally, if I'm not mistaken, the _accelerate_ package allows to [skip steps efficiently](https://github.com/huggingface/accelerate/blob/a73898027a211c3f6dc4460351b0ec246aa824aa/src/accelerate/data_loader.py#L827) for a mapped dataset. Is it possible to lazily load samples of a mapped dataset ? I'm used to [dataset scripts](https://huggingface.co/docs/datasets/dataset_script), maybe something can be done there. If not, I could do it using a plain _Pytorch_ dataset. Then I would need to convert it to a _datasets_' dataset to get all the features of _datasets_. Is it something possible ? ### Your contribution I could provide a PR to allow lazy loading of mapped dataset or the conversion of a mapped _Pytorch_ dataset into a _Datasets_ dataset if you think it is an useful new feature.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5905/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5905/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5898
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5898/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5898/comments
https://api.github.com/repos/huggingface/datasets/issues/5898/events
https://github.com/huggingface/datasets/issues/5898
1,726,190,481
I_kwDODunzps5m45OR
5,898
Loading The flores data set for specific language
{ "login": "106AbdulBasit", "id": 36159918, "node_id": "MDQ6VXNlcjM2MTU5OTE4", "avatar_url": "https://avatars.githubusercontent.com/u/36159918?v=4", "gravatar_id": "", "url": "https://api.github.com/users/106AbdulBasit", "html_url": "https://github.com/106AbdulBasit", "followers_url": "https://api.github.com/users/106AbdulBasit/followers", "following_url": "https://api.github.com/users/106AbdulBasit/following{/other_user}", "gists_url": "https://api.github.com/users/106AbdulBasit/gists{/gist_id}", "starred_url": "https://api.github.com/users/106AbdulBasit/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/106AbdulBasit/subscriptions", "organizations_url": "https://api.github.com/users/106AbdulBasit/orgs", "repos_url": "https://api.github.com/users/106AbdulBasit/repos", "events_url": "https://api.github.com/users/106AbdulBasit/events{/privacy}", "received_events_url": "https://api.github.com/users/106AbdulBasit/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "got that the syntax is like this\r\n\r\ndataset = load_dataset(\"facebook/flores\", \"ace_Arab\")" ]
2023-05-25T17:08:55
2023-05-25T17:21:38
2023-05-25T17:21:37
NONE
null
null
null
### Describe the bug I am trying to load the Flores data set the code which is given is ``` from datasets import load_dataset dataset = load_dataset("facebook/flores") ``` This gives the error of config name ""ValueError: Config name is missing" Now if I add some config it gives me the some error "HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: 'facebook/flores, 'ace_Arab''. " How I can load the data of the specific language ? Couldn't find any tutorial any one can help me out? ### Steps to reproduce the bug step one load the data set `from datasets import load_dataset dataset = load_dataset("facebook/flores")` it gives the error of config once config is given it gives the error of "HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: 'facebook/flores, 'ace_Arab''. " ### Expected behavior Data set should be loaded but I am receiving error ### Environment info Datasets , python ,
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5898/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5898/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5896
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5896/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5896/comments
https://api.github.com/repos/huggingface/datasets/issues/5896/events
https://github.com/huggingface/datasets/issues/5896
1,726,022,500
I_kwDODunzps5m4QNk
5,896
HuggingFace does not cache downloaded files aggressively/early enough
{ "login": "geajack", "id": 2124157, "node_id": "MDQ6VXNlcjIxMjQxNTc=", "avatar_url": "https://avatars.githubusercontent.com/u/2124157?v=4", "gravatar_id": "", "url": "https://api.github.com/users/geajack", "html_url": "https://github.com/geajack", "followers_url": "https://api.github.com/users/geajack/followers", "following_url": "https://api.github.com/users/geajack/following{/other_user}", "gists_url": "https://api.github.com/users/geajack/gists{/gist_id}", "starred_url": "https://api.github.com/users/geajack/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/geajack/subscriptions", "organizations_url": "https://api.github.com/users/geajack/orgs", "repos_url": "https://api.github.com/users/geajack/repos", "events_url": "https://api.github.com/users/geajack/events{/privacy}", "received_events_url": "https://api.github.com/users/geajack/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "I also faced this. Any update?", "We've dropped the `apache-beam` dependency in https://huggingface.co/datasets/wikipedia/discussions/19, so you should no longer get this error." ]
2023-05-25T15:14:36
2024-03-15T15:36:07
2024-03-15T15:36:07
NONE
null
null
null
### Describe the bug I wrote the following script: ``` import datasets dataset = datasets.load.load_dataset("wikipedia", "20220301.en", split="train[:10000]") ``` I ran it and spent 90 minutes downloading a 20GB file. Then I saw: ``` Downloading: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20.3G/20.3G [1:30:29<00:00, 3.73MB/s] Traceback (most recent call last): File "/home/jack/Code/Projects/Transformers/Codebase/main.py", line 5, in <module> dataset = datasets.load.load_dataset("wikipedia", "20220301.en", split="train[:10000]") File "/home/jack/.local/lib/python3.10/site-packages/datasets/load.py", line 1782, in load_dataset builder_instance.download_and_prepare( File "/home/jack/.local/lib/python3.10/site-packages/datasets/builder.py", line 883, in download_and_prepare self._save_info() File "/home/jack/.local/lib/python3.10/site-packages/datasets/builder.py", line 2037, in _save_info import apache_beam as beam ModuleNotFoundError: No module named 'apache_beam' ``` And the 20GB of data was seemingly instantly gone forever, because when I ran the script again, it had to do the download again. ### Steps to reproduce the bug See above ### Expected behavior See above ### Environment info datasets 2.10.1 Python 3.10
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5896/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5896/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5895
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5895/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5895/comments
https://api.github.com/repos/huggingface/datasets/issues/5895/events
https://github.com/huggingface/datasets/issues/5895
1,725,467,252
I_kwDODunzps5m2Ip0
5,895
The dir name and split strings are confused when loading ArmelR/stack-exchange-instruction dataset
{ "login": "DongHande", "id": 45357817, "node_id": "MDQ6VXNlcjQ1MzU3ODE3", "avatar_url": "https://avatars.githubusercontent.com/u/45357817?v=4", "gravatar_id": "", "url": "https://api.github.com/users/DongHande", "html_url": "https://github.com/DongHande", "followers_url": "https://api.github.com/users/DongHande/followers", "following_url": "https://api.github.com/users/DongHande/following{/other_user}", "gists_url": "https://api.github.com/users/DongHande/gists{/gist_id}", "starred_url": "https://api.github.com/users/DongHande/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DongHande/subscriptions", "organizations_url": "https://api.github.com/users/DongHande/orgs", "repos_url": "https://api.github.com/users/DongHande/repos", "events_url": "https://api.github.com/users/DongHande/events{/privacy}", "received_events_url": "https://api.github.com/users/DongHande/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Thanks for reporting, @DongHande.\r\n\r\nI think the issue is caused by the metadata in the dataset card: in the header of the `README.md`, they state that the dataset has 4 splits (\"finetune\", \"reward\", \"rl\", \"evaluation\"). \r\n```yaml\r\n splits:\r\n - name: finetune\r\n num_bytes: 6674567576\r\n num_examples: 3000000\r\n - name: reward\r\n num_bytes: 6674341521\r\n num_examples: 3000000\r\n - name: rl\r\n num_bytes: 6679279968\r\n num_examples: 3000000\r\n - name: evaluation\r\n num_bytes: 4022714493\r\n num_examples: 1807695\r\n```\r\n\r\n\r\nI guess the user wanted to define these as configs, instead of splits. This is not yet supported for no-script datasets, but will be soon supported. See:\r\n- #5331\r\n\r\nI think we should contact the dataset author to inform about the issue with the split names, as you already did: https://huggingface.co/datasets/ArmelR/stack-exchange-instruction/discussions/1\r\nLet's continue the discussion there!", "Thank you! It has been fixed. " ]
2023-05-25T09:39:06
2023-05-29T02:32:12
2023-05-29T02:32:12
NONE
null
null
null
### Describe the bug When I load the ArmelR/stack-exchange-instruction dataset, I encounter a bug that may be raised by confusing the dir name string and the split string about the dataset. When I use the script "datasets.load_dataset('ArmelR/stack-exchange-instruction', data_dir="data/finetune", split="train", use_auth_token=True)", it fails. But it succeeds when I add the "streaming = True" parameter. The website of the dataset is https://huggingface.co/datasets/ArmelR/stack-exchange-instruction/ . The traceback logs are as below: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/load.py", line 1797, in load_dataset builder_instance.download_and_prepare( File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/builder.py", line 890, in download_and_prepare self._download_and_prepare( File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/builder.py", line 985, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/builder.py", line 1706, in _prepare_split split_info = self.info.splits[split_generator.name] File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/splits.py", line 530, in __getitem__ instructions = make_file_instructions( File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/arrow_reader.py", line 112, in make_file_instructions name2filenames = { File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/arrow_reader.py", line 113, in <dictcomp> info.name: filenames_for_dataset_split( File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/naming.py", line 70, in filenames_for_dataset_split prefix = filename_prefix_for_split(dataset_name, split) File "/home/xxx/miniconda3/envs/code/lib/python3.9/site-packages/datasets/naming.py", line 54, in filename_prefix_for_split if os.path.basename(name) != name: File "/home/xxx/miniconda3/envs/code/lib/python3.9/posixpath.py", line 142, in basename p = os.fspath(p) TypeError: expected str, bytes or os.PathLike object, not NoneType ### Steps to reproduce the bug 1. import datasets library function: ```from datasets import load_dataset``` 2. load dataset: ```ds=load_dataset('ArmelR/stack-exchange-instruction', data_dir="data/finetune", split="train", use_auth_token=True)``` ### Expected behavior The dataset can be loaded successfully without the streaming setting. ### Environment info Linux, python=3.9 datasets=2.12.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5895/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5895/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5892
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5892/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5892/comments
https://api.github.com/repos/huggingface/datasets/issues/5892/events
https://github.com/huggingface/datasets/issues/5892
1,722,503,824
I_kwDODunzps5mq1KQ
5,892
User access requests with manual review do not notify the dataset owner
{ "login": "leondz", "id": 121934, "node_id": "MDQ6VXNlcjEyMTkzNA==", "avatar_url": "https://avatars.githubusercontent.com/u/121934?v=4", "gravatar_id": "", "url": "https://api.github.com/users/leondz", "html_url": "https://github.com/leondz", "followers_url": "https://api.github.com/users/leondz/followers", "following_url": "https://api.github.com/users/leondz/following{/other_user}", "gists_url": "https://api.github.com/users/leondz/gists{/gist_id}", "starred_url": "https://api.github.com/users/leondz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/leondz/subscriptions", "organizations_url": "https://api.github.com/users/leondz/orgs", "repos_url": "https://api.github.com/users/leondz/repos", "events_url": "https://api.github.com/users/leondz/events{/privacy}", "received_events_url": "https://api.github.com/users/leondz/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "cc @SBrandeis", "I think this has been addressed.\r\n\r\nPlease open a new issue if you are still not getting notified." ]
2023-05-23T17:27:46
2023-07-21T13:55:37
2023-07-21T13:55:36
CONTRIBUTOR
null
null
null
### Describe the bug When a user access requests are enabled, and new requests are set to Manual Review, the dataset owner should be notified of the pending requests. However, instead, currently nothing happens, and so the dataset request can go unanswered for quite some time until the owner happens to check that particular dataset's Settings pane. ### Steps to reproduce the bug 1. Enable a dataset's user access requests 2. Set to Manual Review 3. Ask another HF user to request access to the dataset 4. Dataset owner is not notified ### Expected behavior The dataset owner should receive some kind of notification, perhaps in their HF site inbox, or by email, when a dataset access request is made and manual review is enabled. ### Environment info n/a
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5892/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5892/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5889
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5889/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5889/comments
https://api.github.com/repos/huggingface/datasets/issues/5889/events
https://github.com/huggingface/datasets/issues/5889
1,722,373,618
I_kwDODunzps5mqVXy
5,889
Token Alignment for input and output data over train and test batch/dataset.
{ "login": "akesh1235", "id": 125154243, "node_id": "U_kgDOB3Wzww", "avatar_url": "https://avatars.githubusercontent.com/u/125154243?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akesh1235", "html_url": "https://github.com/akesh1235", "followers_url": "https://api.github.com/users/akesh1235/followers", "following_url": "https://api.github.com/users/akesh1235/following{/other_user}", "gists_url": "https://api.github.com/users/akesh1235/gists{/gist_id}", "starred_url": "https://api.github.com/users/akesh1235/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akesh1235/subscriptions", "organizations_url": "https://api.github.com/users/akesh1235/orgs", "repos_url": "https://api.github.com/users/akesh1235/repos", "events_url": "https://api.github.com/users/akesh1235/events{/privacy}", "received_events_url": "https://api.github.com/users/akesh1235/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[]
2023-05-23T15:58:55
2023-05-23T15:58:55
null
NONE
null
null
null
`data` > DatasetDict({ train: Dataset({ features: ['input', 'output'], num_rows: 4500 }) test: Dataset({ features: ['input', 'output'], num_rows: 500 }) }) **# input (in-correct sentence)** `data['train'][0]['input']` **>>** 'We are meet sunday 10am12pmET in Crown Heights Brooklyn New York' **# output (correct sentence)** `data['train'][0]['output']` **>>** 'We meet Sundays 10am-12pmET in Crown Heights, Brooklyn, New York.' **I Want to align the output tokens with input** ``` `# tokenize both inputs and targets def tokenize_fn(batch): # tokenize the input sequence first # this populates input_ids, attention_mask, etc. tokenized_inputs = tokenizer( batch['input'] ) labels_batch = tokenizer.tokenize(batch['output']) # original targets aligned_labels_batch = [] for i, labels in enumerate(labels_batch): word_ids = tokenized_inputs[i].word_ids() aligned_labels_batch.append(align_targets(labels, word_ids)) # align_targets is another user defined function which is been called here # recall: the 'target' must be stored in key called 'labels' tokenized_inputs['labels'] = aligned_labels_batch return tokenized_inputs` ``` ``` data.map( tokenize_fn, batched=True, remove_columns=data['train'].column_names, ) ``` When this user defined function is mapped to every records of train and test batch am getting following error: **1.** **raise DatasetTransformationNotAllowedError( 3457 "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."** **2.** **TypeError: TextEncodeInput must be Union[TextInputSequence, Tuple[InputSequence, InputSequence]]**
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5889/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5889/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5887
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5887/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5887/comments
https://api.github.com/repos/huggingface/datasets/issues/5887/events
https://github.com/huggingface/datasets/issues/5887
1,722,166,382
I_kwDODunzps5mpixu
5,887
HuggingsFace dataset example give error
{ "login": "donhuvy", "id": 1328316, "node_id": "MDQ6VXNlcjEzMjgzMTY=", "avatar_url": "https://avatars.githubusercontent.com/u/1328316?v=4", "gravatar_id": "", "url": "https://api.github.com/users/donhuvy", "html_url": "https://github.com/donhuvy", "followers_url": "https://api.github.com/users/donhuvy/followers", "following_url": "https://api.github.com/users/donhuvy/following{/other_user}", "gists_url": "https://api.github.com/users/donhuvy/gists{/gist_id}", "starred_url": "https://api.github.com/users/donhuvy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/donhuvy/subscriptions", "organizations_url": "https://api.github.com/users/donhuvy/orgs", "repos_url": "https://api.github.com/users/donhuvy/repos", "events_url": "https://api.github.com/users/donhuvy/events{/privacy}", "received_events_url": "https://api.github.com/users/donhuvy/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false }
[ { "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false } ]
null
[ "Nice catch @donhuvy, that's because some models don't need the `token_type_ids`, as in this case, as the example is using `distilbert-base-cased`, and according to the DistilBert documentation at https://huggingface.co/transformers/v3.0.2/model_doc/distilbert.html, `DistilBert doesn’t have token_type_ids, you don’t need to indicate which token belongs to which segment. Just separate your segments with the separation token tokenizer.sep_token (or [SEP])`. `token_type_ids` are neither required in some other well known models such as RoBERTa. \r\n\r\nHere the issue comes due to a mismatch between the tokenizer and the model, as the Colab is using a BERT tokenizer (`bert-base-cased`), while the model is a DistilBERT (`distilbert-base-cased`), so aligning the tokenizer and the model solves it!", "#self-assign", "@donhuvy I've created https://github.com/huggingface/datasets/pull/5902 to solve it! 🤗", "This has been addressed in #5902.\r\n\r\nThe Quicktour notebook is deprecated now - please use the notebook version of the [Quickstart doc page](https://huggingface.co/docs/datasets/main/en/quickstart) instead (\"Open in Colab\" button)." ]
2023-05-23T14:09:05
2023-07-25T14:01:01
2023-07-25T14:01:00
NONE
null
null
null
### Describe the bug ![image](https://github.com/huggingface/datasets/assets/1328316/1f4f0086-3db9-4c79-906b-05a375357cce) ![image](https://github.com/huggingface/datasets/assets/1328316/733ebd3d-89b9-4ece-b80a-00ab5b0a4122) ### Steps to reproduce the bug Use link as reference document written https://colab.research.google.com/github/huggingface/datasets/blob/main/notebooks/Overview.ipynb#scrollTo=biqDH9vpvSVz ```python # Now let's train our model device = 'cuda' if torch.cuda.is_available() else 'cpu' model.train().to(device) for i, batch in enumerate(dataloader): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() model.zero_grad() print(f'Step {i} - loss: {loss:.3}') if i > 5: break ``` Error ```python --------------------------------------------------------------------------- TypeError Traceback (most recent call last) [<ipython-input-44-7040b885f382>](https://localhost:8080/#) in <cell line: 5>() 5 for i, batch in enumerate(dataloader): 6 batch.to(device) ----> 7 outputs = model(**batch) 8 loss = outputs.loss 9 loss.backward() [/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py](https://localhost:8080/#) in _call_impl(self, *args, **kwargs) 1499 or _global_backward_pre_hooks or _global_backward_hooks 1500 or _global_forward_hooks or _global_forward_pre_hooks): -> 1501 return forward_call(*args, **kwargs) 1502 # Do not call functions when jit is used 1503 full_backward_hooks, non_full_backward_hooks = [], [] TypeError: DistilBertForQuestionAnswering.forward() got an unexpected keyword argument 'token_type_ids' ``` https://github.com/huggingface/datasets/assets/1328316/5d8b1d61-9337-4d59-8423-4f37f834c156 ### Expected behavior Run success on Google Colab (free) ### Environment info Windows 11 x64, Google Colab free (my Google Drive just empty about 200 MB, but I don't think it cause problem)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5887/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5887/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5886
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5886/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5886/comments
https://api.github.com/repos/huggingface/datasets/issues/5886/events
https://github.com/huggingface/datasets/issues/5886
1,721,070,225
I_kwDODunzps5mlXKR
5,886
Use work-stealing algorithm when parallel computing
{ "login": "1014661165", "id": 46060451, "node_id": "MDQ6VXNlcjQ2MDYwNDUx", "avatar_url": "https://avatars.githubusercontent.com/u/46060451?v=4", "gravatar_id": "", "url": "https://api.github.com/users/1014661165", "html_url": "https://github.com/1014661165", "followers_url": "https://api.github.com/users/1014661165/followers", "following_url": "https://api.github.com/users/1014661165/following{/other_user}", "gists_url": "https://api.github.com/users/1014661165/gists{/gist_id}", "starred_url": "https://api.github.com/users/1014661165/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/1014661165/subscriptions", "organizations_url": "https://api.github.com/users/1014661165/orgs", "repos_url": "https://api.github.com/users/1014661165/repos", "events_url": "https://api.github.com/users/1014661165/events{/privacy}", "received_events_url": "https://api.github.com/users/1014661165/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Alternatively we could set the number of shards to be a factor than the number of processes (current they're equal) - this way it will be less likely to end up with a shard that is significantly slower than all the other ones." ]
2023-05-23T03:08:44
2023-05-24T15:30:09
null
NONE
null
null
null
### Feature request when i used Dataset.map api to process data concurrently, i found that it gets slower and slower as it gets closer to completion. Then i read the source code of arrow_dataset.py and found that it shard the dataset and use multiprocessing pool to execute each shard.It may cause the slowest task to drag out the entire program's execution time,especially when processing huge dataset. ### Motivation using work-stealing algorithm instead of sharding and parallel computing to optimize performance. ### Your contribution just an idea.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5886/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5886/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5888
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5888/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5888/comments
https://api.github.com/repos/huggingface/datasets/issues/5888/events
https://github.com/huggingface/datasets/issues/5888
1,722,290,363
I_kwDODunzps5mqBC7
5,888
A way to upload and visualize .mp4 files (millions of them) as part of a dataset
{ "login": "AntreasAntoniou", "id": 10792502, "node_id": "MDQ6VXNlcjEwNzkyNTAy", "avatar_url": "https://avatars.githubusercontent.com/u/10792502?v=4", "gravatar_id": "", "url": "https://api.github.com/users/AntreasAntoniou", "html_url": "https://github.com/AntreasAntoniou", "followers_url": "https://api.github.com/users/AntreasAntoniou/followers", "following_url": "https://api.github.com/users/AntreasAntoniou/following{/other_user}", "gists_url": "https://api.github.com/users/AntreasAntoniou/gists{/gist_id}", "starred_url": "https://api.github.com/users/AntreasAntoniou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/AntreasAntoniou/subscriptions", "organizations_url": "https://api.github.com/users/AntreasAntoniou/orgs", "repos_url": "https://api.github.com/users/AntreasAntoniou/repos", "events_url": "https://api.github.com/users/AntreasAntoniou/events{/privacy}", "received_events_url": "https://api.github.com/users/AntreasAntoniou/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Hi! \r\n\r\nYou want to use `push_to_hub` (creates Parquet files) instead of `save_to_disk` (creates Arrow files) when creating a Hub dataset. Parquet is designed for long-term storage and takes less space than the Arrow format, and, most importantly, `load_dataset` can parse it, which should fix the viewer. \r\n\r\nRegarding the dataset generation, `Dataset.from_generator` with the video data represented as `datasets.Value(\"binary\")` followed by `push_to_hub` should work (if the `push_to_hub` step times out, restart it to resume uploading)\r\n\r\nPS: Once the dataset is uploaded, to make working with the dataset easier, it's a good idea to add a [transform](https://huggingface.co/docs/datasets/main/en/process#format-transform) to the README that shows how to decode the binary video data into something a model can understand. Also, if you get an `ArrowInvalid` error (can happen when working with large binary data) in `Dataset.from_generator`, reduce the value of `writer_batch_size` (the default is 1000) to fix it.", "One issue here is that Dataset.from_generator can work well for the non 'infinite sampling' version of the dataset. The training set for example is often sampled dynamically given the video files that I have uploaded. I worry that storing the video data as binary means that I'll end up duplicating a lot of the data. Furthermore, storing video data as anything but .mp4 would quickly make the dataset size from 1.9TB to 1PB. ", "> storing video data as anything but .mp4\r\n\r\nWhat I mean by storing as `datasets.Value(\"binary\")` is embedding raw MP4 bytes in the Arrow table, but, indeed, this would waste a lot of space if there are duplicates.\r\n\r\nSo I see two options:\r\n* if one video is not mapped to too many samples, you can embed the video bytes and do \"group by\" on the rest of the columns (this would turn them into lists) to avoid duplicating them (then, it should be easy to define a `map` in the README that samples the video data to \"unpack\" the samples)\r\n* you can create a dataset script that downloads the video files and embeds their file paths into the Arrow file\r\n\r\nAlso, I misread MP4 as MP3. We need to add a `Video` feature to the `datasets` lib to support MP4 files in the viewer (a bit trickier to implement than the `Image` feature due to the Arrow limitations).", "I'm transferring this issue to the `datasets` repo, as it's not related to `huggingface_hub`", "@mariosasko Right. If I want my dataset to be streamable, what are the necessary requirements to achieve that within the context of .mp4 binaries like we have here? I guess your second point here would not support that right?", "The streaming would work, but the video paths would require using `fsspec.open` to get the content.", "Are there any plans to make video playable on the hub?", "Not yet. The (open source) tooling for video is not great in terms of ease of use/performance, so we are discussing internally the best way to support it (one option is creating a new library for video IO, but this will require a lot of work)", "True. I spend a good 4 months just mixing and matching existing solutions so I could get performance that would not IO bound my model training. \r\n\r\nThis is what I ended up with, in case it's useful\r\n\r\nhttps://github.com/AntreasAntoniou/TALI/blob/045cf9e5aa75b1bf2c6d5351fb910fa10e3ff32c/tali/data/data_plus.py#L85" ]
2023-05-22T18:05:26
2023-06-23T03:37:16
null
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** I recently chose to use huggingface hub as the home for a large multi modal dataset I've been building. https://huggingface.co/datasets/Antreas/TALI It combines images, text, audio and video. Now, I could very easily upload a dataset made via datasets.Dataset.from_generator, as long as it did not include video files. I found that including .mp4 files in the entries would not auto-upload those files. Hence I tried to upload them myself. I quickly found out that uploading many small files is a very bad way to use git lfs, and that it would take ages, so, I resorted to using 7z to pack them all up. But then I had a new problem. My dataset had a size of 1.9TB. Trying to upload such a large file with the default huggingface_hub API always resulted in time outs etc. So I decided to split the large files into chunks of 5GB each and reupload. So, eventually it all worked out. But now the dataset can't be properly and natively used by the datasets API because of all the needed preprocessing -- and furthermore the hub is unable to visualize things. **Describe the solution you'd like** A native way to upload large datasets that include .mp4 or other video types. **Describe alternatives you've considered** Already explained earlier **Additional context** https://huggingface.co/datasets/Antreas/TALI
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5888/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5888/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5884
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5884/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5884/comments
https://api.github.com/repos/huggingface/datasets/issues/5884/events
https://github.com/huggingface/datasets/issues/5884
1,719,548,172
I_kwDODunzps5mfjkM
5,884
`Dataset.to_tf_dataset` fails when strings cannot be encoded as `np.bytes_`
{ "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false }
[ { "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false } ]
null
[ "May eventually be solved in #5883 ", "#self-assign" ]
2023-05-22T12:03:06
2023-06-09T16:04:56
2023-06-09T16:04:55
CONTRIBUTOR
null
null
null
### Describe the bug When loading any dataset that contains a column with strings that are not ASCII-compatible, looping over those records raises the following exception e.g. for `é` character `UnicodeEncodeError: 'ascii' codec can't encode character '\xe9' in position 0: ordinal not in range(128)`. ### Steps to reproduce the bug Running the following script will eventually fail, when reaching to the batch that contains non-ASCII compatible strings. ```python from datasets import load_dataset ds = load_dataset("imdb", split="train") tfds = ds.to_tf_dataset(batch_size=16) for batch in tfds: print(batch) >>> UnicodeEncodeError: 'ascii' codec can't encode character '\xe9' in position 0: ordinal not in range(128) ``` ### Expected behavior The following script to run properly, making sure that the strings are either `numpy.unicode_` or `numpy.string` instead of `numpy.bytes_` since some characters are not ASCII compatible and that would lead to an issue when applying the `map`. ```python from datasets import load_dataset ds = load_dataset("imdb", split="train") tfds = ds.to_tf_dataset(batch_size=16) for batch in tfds: print(batch) ``` ### Environment info - `datasets` version: 2.12.1.dev0 - Platform: macOS-13.3.1-arm64-arm-64bit - Python version: 3.10.11 - Huggingface_hub version: 0.14.1 - PyArrow version: 11.0.0 - Pandas version: 1.5.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5884/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5884/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5881
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5881/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5881/comments
https://api.github.com/repos/huggingface/datasets/issues/5881/events
https://github.com/huggingface/datasets/issues/5881
1,719,402,643
I_kwDODunzps5mfACT
5,881
Split dataset by node: index error when sharding iterable dataset
{ "login": "sanchit-gandhi", "id": 93869735, "node_id": "U_kgDOBZhWpw", "avatar_url": "https://avatars.githubusercontent.com/u/93869735?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sanchit-gandhi", "html_url": "https://github.com/sanchit-gandhi", "followers_url": "https://api.github.com/users/sanchit-gandhi/followers", "following_url": "https://api.github.com/users/sanchit-gandhi/following{/other_user}", "gists_url": "https://api.github.com/users/sanchit-gandhi/gists{/gist_id}", "starred_url": "https://api.github.com/users/sanchit-gandhi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sanchit-gandhi/subscriptions", "organizations_url": "https://api.github.com/users/sanchit-gandhi/orgs", "repos_url": "https://api.github.com/users/sanchit-gandhi/repos", "events_url": "https://api.github.com/users/sanchit-gandhi/events{/privacy}", "received_events_url": "https://api.github.com/users/sanchit-gandhi/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "cc @lhoestq in case you have any ideas here! Might need a multi-host set-up to debug (can give you access to a JAX one if you need)", "I am also facing the same problem. Could you let me know if you found a solution for this?", "I couldn't reproduce with the latest version of `datasets` 2.16.1, can you update `datasets` and try again ?" ]
2023-05-22T10:36:13
2024-01-29T14:20:43
null
CONTRIBUTOR
null
null
null
### Describe the bug Context: we're splitting an iterable dataset by node and then passing it to a torch data loader with multiple workers When we iterate over it for 5 steps, we don't get an error When we instead iterate over it for 8 steps, we get an `IndexError` when fetching the data if we have too many workers ### Steps to reproduce the bug Here, we have 2 JAX processes (`jax.process_count() = 2`) which we split the dataset over. The dataset loading script can be found here: https://huggingface.co/datasets/distil-whisper/librispeech_asr/blob/c6a1e805cbfeed5057400ac5937327d7e30281b8/librispeech_asr.py#L310 <details> <summary> Code to reproduce </summary> ```python from datasets import load_dataset import jax from datasets.distributed import split_dataset_by_node from torch.utils.data import DataLoader from tqdm import tqdm # load an example dataset (https://huggingface.co/datasets/distil-whisper/librispeech_asr) dataset = load_dataset("distil-whisper/librispeech_asr", "all", split="train.clean.100", streaming=True) # just keep the text column -> no need to define a collator dataset_text = dataset.remove_columns(set(dataset.features.keys()) - {"text"}) # define some constants batch_size = 256 num_examples = 5 # works for 5 examples, doesn't for 8 num_workers = dataset_text.n_shards # try with multiple workers dataloader = DataLoader(dataset_text, batch_size=batch_size, num_workers=num_workers, drop_last=True) for i, batch in tqdm(enumerate(dataloader), total=num_examples, desc="Multiple workers"): if i == num_examples: break # try splitting by node (we can't do this with `dataset_text` since `split_dataset_by_node` expects the Audio column for an ASR dataset) dataset = split_dataset_by_node(dataset, rank=jax.process_index(), world_size=jax.process_count()) # remove the text column again dataset_text = dataset.remove_columns(set(dataset.features.keys()) - {"text"}) dataloader = DataLoader(dataset_text, batch_size=16, num_workers=num_workers // 2, drop_last=True) for i, batch in tqdm(enumerate(dataloader), total=num_examples, desc="Split by node"): if i == num_examples: break # too many workers dataloader = DataLoader(dataset_text, batch_size=256, num_workers=num_workers, drop_last=True) for i, batch in tqdm(enumerate(dataloader), total=num_examples, desc="Too many workers"): if i == num_examples: break ``` </details> <details> <summary> With 5 examples: </summary> ``` Multiple workers: 100%|███████████████████████████████████████████████████████████████████| 5/5 [00:16<00:00, 3.33s/it] Assigning 7 shards (or data sources) of the dataset to each node. Split by node: 100%|██████████████████████████████████████████████████████████████████████| 5/5 [00:13<00:00, 2.76s/it] Assigning 7 shards (or data sources) of the dataset to each node. Too many dataloader workers: 14 (max is dataset.n_shards=7). Stopping 7 dataloader workers. To parallelize data loading, we give each process some shards (or data sources) to process. Therefore it's unnecessary t o have a number of workers greater than dataset.n_shards=7. To enable more parallelism, please split the dataset in more files than 7. Too many workers: 100%|███████████████████████████████████████████████████████████████████| 5/5 [00:15<00:00, 3.03s/it] ``` </details> <details> <summary> With 7 examples: </summary> ``` Multiple workers: 100%|███████████████████████████████████████████████████████████████████| 8/8 [00:13<00:00, 1.71s/it] Assigning 7 shards (or data sources) of the dataset to each node. Split by node: 100%|██████████████████████████████████████████████████████████████████████| 8/8 [00:11<00:00, 1.38s/it] Assigning 7 shards (or data sources) of the dataset to each node. Too many dataloader workers: 14 (max is dataset.n_shards=7). Stopping 7 dataloader workers. To parallelize data loading, we give each process some shards (or data sources) to process. Therefore it's unnecessary to have a number of workers greater than dataset.n_shards=7. To enable more parallelism, please split the dataset in more files than 7. Too many workers: 88%|██████████████████████████████████████████████████████████▋ | 7/8 [00:13<00:01, 1.89s/it] Traceback (most recent call last): File "distil-whisper/test_librispeech.py", line 36, in <module> for i, batch in tqdm(enumerate(dataloader), total=num_examples, desc="Too many workers"): File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/tqdm/std.py", line 1178, in __iter__ for obj in iterable: File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 633, in __next__ data = self._next_data() File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1325, in _next_data return self._process_data(data) File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1371, in _process_data data.reraise() File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/torch/_utils.py", line 644, in reraise raise exception IndexError: Caught IndexError in DataLoader worker process 7. Original Traceback (most recent call last): File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 308, in _worker_loop data = fetcher.fetch(index) File "/home/sanchitgandhi/hf/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 32, in fetch data.append(next(self.dataset_iter)) File "/home/sanchitgandhi/datasets/src/datasets/iterable_dataset.py", line 986, in __iter__ yield from self._iter_pytorch(ex_iterable) File "/home/sanchitgandhi/datasets/src/datasets/iterable_dataset.py", line 920, in _iter_pytorch for key, example in ex_iterable.shard_data_sources(worker_info.id, worker_info.num_workers): File "/home/sanchitgandhi/datasets/src/datasets/iterable_dataset.py", line 540, in shard_data_sources self.ex_iterable.shard_data_sources(worker_id, num_workers), File "/home/sanchitgandhi/datasets/src/datasets/iterable_dataset.py", line 796, in shard_data_sources self.ex_iterable.shard_data_sources(worker_id, num_workers), File "/home/sanchitgandhi/datasets/src/datasets/iterable_dataset.py", line 126, in shard_data_sources requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices]) File "/home/sanchitgandhi/datasets/src/datasets/utils/sharding.py", line 76, in _merge_gen_kwargs for key in gen_kwargs_list[0] IndexError: list index out of range ``` </details> ### Expected behavior Should pass for both 5 and 7 examples ### Environment info - `datasets` version: 2.12.1.dev0 - Platform: Linux-5.13.0-1023-gcp-x86_64-with-glibc2.29 - Python version: 3.8.10 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5881/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5881/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5880
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5880/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5880/comments
https://api.github.com/repos/huggingface/datasets/issues/5880/events
https://github.com/huggingface/datasets/issues/5880
1,719,090,101
I_kwDODunzps5mdzu1
5,880
load_dataset from s3 file system through streaming can't not iterate data
{ "login": "janineguo", "id": 59083384, "node_id": "MDQ6VXNlcjU5MDgzMzg0", "avatar_url": "https://avatars.githubusercontent.com/u/59083384?v=4", "gravatar_id": "", "url": "https://api.github.com/users/janineguo", "html_url": "https://github.com/janineguo", "followers_url": "https://api.github.com/users/janineguo/followers", "following_url": "https://api.github.com/users/janineguo/following{/other_user}", "gists_url": "https://api.github.com/users/janineguo/gists{/gist_id}", "starred_url": "https://api.github.com/users/janineguo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/janineguo/subscriptions", "organizations_url": "https://api.github.com/users/janineguo/orgs", "repos_url": "https://api.github.com/users/janineguo/repos", "events_url": "https://api.github.com/users/janineguo/events{/privacy}", "received_events_url": "https://api.github.com/users/janineguo/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "This sounds related to #5281.\r\n\r\nCan you try passing `storage_options=s3_client.storage_options` instead passing it to `use_auth_token=` ?", "I tried `storage_options` before, but it doesn't work, I checked our source code and I found that we even didn't pass this parameter to the following process. if I use `storage_options` instead of `use_auth_token`, then I also need to change another place of the code. the last line of `streaming_download_manager.py`. our code only passes the `use_auth_token` to the following handler, but does nothing to the `storage_options`\r\n<img width=\"1050\" alt=\"image\" src=\"https://github.com/huggingface/datasets/assets/59083384/5be90933-3331-4ecf-9e11-34f9852d8f92\">\r\n", "Cloud storage support is still experimental indeed and you can expect some bugs.\r\n\r\nI think we need to pass the storage options anywhere use_auth_token is passed in indeed. Let me know if you'd be interested in contributing a fix !", "Oh, that's great, I really like to fix it. because datasets is really useful and most of our projects need to use it, but we can store our data on the internet due to security reasons. fix it not only make our own work more efficient but also can benefit others who use it." ]
2023-05-22T07:40:27
2023-05-26T12:52:08
null
CONTRIBUTOR
null
null
null
### Describe the bug I have a JSON file in my s3 file system(minio), I can use load_dataset to get the file link, but I can't iterate it <img width="816" alt="image" src="https://github.com/huggingface/datasets/assets/59083384/cc0778d3-36f3-45b5-ac68-4e7c664c2ed0"> <img width="1144" alt="image" src="https://github.com/huggingface/datasets/assets/59083384/76872af3-8b3c-42ff-9f55-528c920a7af1"> we can change 4 lines to fix this bug, you can check whether it is ok for us. <img width="941" alt="image" src="https://github.com/huggingface/datasets/assets/59083384/5a22155a-ece7-496c-8506-047e5c235cd3"> ### Steps to reproduce the bug 1. storage a file in you s3 file system 2. use load_dataset to read it through streaming 3. iterate it ### Expected behavior can iterate it successfully ### Environment info - `datasets` version: 2.12.0 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.8.16 - Huggingface_hub version: 0.14.1 - PyArrow version: 12.0.0 - Pandas version: 2.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5880/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/huggingface/datasets/issues/5880/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5878
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5878/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5878/comments
https://api.github.com/repos/huggingface/datasets/issues/5878/events
https://github.com/huggingface/datasets/issues/5878
1,718,203,843
I_kwDODunzps5mabXD
5,878
Prefetching for IterableDataset
{ "login": "vyeevani", "id": 30946190, "node_id": "MDQ6VXNlcjMwOTQ2MTkw", "avatar_url": "https://avatars.githubusercontent.com/u/30946190?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vyeevani", "html_url": "https://github.com/vyeevani", "followers_url": "https://api.github.com/users/vyeevani/followers", "following_url": "https://api.github.com/users/vyeevani/following{/other_user}", "gists_url": "https://api.github.com/users/vyeevani/gists{/gist_id}", "starred_url": "https://api.github.com/users/vyeevani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vyeevani/subscriptions", "organizations_url": "https://api.github.com/users/vyeevani/orgs", "repos_url": "https://api.github.com/users/vyeevani/repos", "events_url": "https://api.github.com/users/vyeevani/events{/privacy}", "received_events_url": "https://api.github.com/users/vyeevani/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Very cool! Do you have a link to the code that you're using to eagerly fetch the data? Would also be interested in hacking around something here for pre-fetching iterable datasets", "I ended up just switching back to the pytorch dataloader and using it's multiprocessing functionality to handle this :(. I'm just not that familiar with python multiprocessing to get something to work in jupyter (kept having weird behaviors happening with zombies living after the cell finished).", "Ultimately settled on using webdataset to circumvent huggingface datasets entirely. Would definitely switch back if: https://github.com/huggingface/datasets/issues/5337 was resolved.", "Hi! You can combine `datasets` with `torchdata` to prefetch `IterableDataset`'s samples:\r\n```python\r\nfrom datasets import load_dataset\r\nfrom torchdata.datapipes.iter import IterableWrapper, HuggingFaceHubReader\r\nfrom torch.utils.data import DataLoader\r\n\r\nds = load_dataset(\"sst\", split=\"train\", streaming=True)\r\n# processing...\r\ndp = IterableWrapper(ds)\r\ndp = dp.prefetch(100)\r\ndl = DataLoader(dp, batch_size=8)\r\n\r\ni = iter(dl)\r\nnext(i)\r\n```", "Hey @mariosasko! Thanks for the tip here - introducing prefetch with `torchdata` didn't really give me any performance difference vs not prefetching, but the concept is definitely one that could be really beneficial. Are there any benchmarks that show the speed-up you can get with `torchdata`'s prefetch just for comparison?" ]
2023-05-20T15:25:40
2023-06-01T17:40:00
null
NONE
null
null
null
### Feature request Add support for prefetching the next n batches through iterabledataset to reduce batch loading bottleneck in training loop. ### Motivation The primary motivation behind this is to use hardware accelerators alongside a streaming dataset. This is required when you are in a low ram or low disk space setting as well as quick iteration where you're iterating though different accelerator environments (e.x changing ec2 instances quickly to figure out batch/sec for a particular architecture). Currently, using the IterableDataset results in accelerators becoming basically useless due to the massive bottleneck induced by the dataset lazy loading/transform/mapping. I've considered two alternatives: PyTorch dataloader that handles this. However, I'm using jax, and I believe this is a piece of functionality that should live in the stream class. Replicating the "num_workers" part of the PyTorch DataLoader to eagerly load batches and apply the transform so Arrow caching will automatically cache results and make them accessible. ### Your contribution I may or may not have time to do this. Currently, I've written the basic multiprocessor approach to handle the eager DataLoader for my own use case with code that's not integrated to datasets. I'd definitely see this as being the default over the regular Dataset for most people given that they wouldn't have to wait on the datasets while also not worrying about performance.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5878/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5878/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5877
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5877/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5877/comments
https://api.github.com/repos/huggingface/datasets/issues/5877/events
https://github.com/huggingface/datasets/issues/5877
1,717,983,961
I_kwDODunzps5mZlrZ
5,877
Request for text deduplication feature
{ "login": "SupreethRao99", "id": 55043035, "node_id": "MDQ6VXNlcjU1MDQzMDM1", "avatar_url": "https://avatars.githubusercontent.com/u/55043035?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SupreethRao99", "html_url": "https://github.com/SupreethRao99", "followers_url": "https://api.github.com/users/SupreethRao99/followers", "following_url": "https://api.github.com/users/SupreethRao99/following{/other_user}", "gists_url": "https://api.github.com/users/SupreethRao99/gists{/gist_id}", "starred_url": "https://api.github.com/users/SupreethRao99/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SupreethRao99/subscriptions", "organizations_url": "https://api.github.com/users/SupreethRao99/orgs", "repos_url": "https://api.github.com/users/SupreethRao99/repos", "events_url": "https://api.github.com/users/SupreethRao99/events{/privacy}", "received_events_url": "https://api.github.com/users/SupreethRao99/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "The \"exact match\" deduplication will be possible when we resolve https://github.com/huggingface/datasets/issues/2514 (first, https://github.com/apache/arrow/issues/30950 needs to be addressed on the Arrow side). In the meantime, you can use Polars or DuckDB (e.g., via [datasets-sql](https://github.com/mariosasko/datasets_sql)).\r\n\r\nFuzzy deduplication is out-of-scope for now ([splink](https://github.com/moj-analytical-services/splink) is probably the best tool for it).", "This library can be an intermediate solution : https://github.com/ChenghaoMou/text-dedup/tree/main", "I have been using polars to remove duplicates but it would be nice to do it directly in pyarrow.\r\n\r\nFor example,\r\n\r\n1. Read dataset with pyarrow\r\n2. Use scan_pyarrow_dataset() with Polars to create a LazyFrame\r\n3. Use sort and unique to remove duplicates based on a subset of columns\r\n4. Convert to table and save data with ds.write_dataset()\r\n\r\nThere are times where that workflow makes perfect sense because I do additional transformations with Polars. Most of the time I am simply just reading dataset A and writing dataset B without duplicates though, and I wish I could use a pyarrow scanner or table directly. ", "Hi\r\nsee this new release from hf [datatrove](https://github.com/huggingface/datatrove)\r\nDataTrove is a library to process, filter and deduplicate text data at a very large scale. It provides a set of prebuilt commonly used processing blocks with a framework to easily add custom functionality" ]
2023-05-20T01:56:00
2024-01-25T14:40:09
null
NONE
null
null
null
### Feature request It would be great if there would be support for high performance, highly scalable text deduplication algorithms as part of the datasets library. ### Motivation Motivated by this blog post https://huggingface.co/blog/dedup and this library https://github.com/google-research/deduplicate-text-datasets, but slightly frustrated by how its not very easy to work with these tools I am proposing this feature. ### Your contribution I would be happy to contribute to the development effort of this feature. would love to collaborate with others in the development effort.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5877/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5877/timeline
null
null
https://api.github.com/repos/huggingface/datasets/issues/5876
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5876/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5876/comments
https://api.github.com/repos/huggingface/datasets/issues/5876/events
https://github.com/huggingface/datasets/issues/5876
1,717,978,985
I_kwDODunzps5mZkdp
5,876
Incompatibility with DataLab
{ "login": "helpmefindaname", "id": 26192135, "node_id": "MDQ6VXNlcjI2MTkyMTM1", "avatar_url": "https://avatars.githubusercontent.com/u/26192135?v=4", "gravatar_id": "", "url": "https://api.github.com/users/helpmefindaname", "html_url": "https://github.com/helpmefindaname", "followers_url": "https://api.github.com/users/helpmefindaname/followers", "following_url": "https://api.github.com/users/helpmefindaname/following{/other_user}", "gists_url": "https://api.github.com/users/helpmefindaname/gists{/gist_id}", "starred_url": "https://api.github.com/users/helpmefindaname/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/helpmefindaname/subscriptions", "organizations_url": "https://api.github.com/users/helpmefindaname/orgs", "repos_url": "https://api.github.com/users/helpmefindaname/repos", "events_url": "https://api.github.com/users/helpmefindaname/events{/privacy}", "received_events_url": "https://api.github.com/users/helpmefindaname/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892877, "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue", "name": "good first issue", "color": "7057ff", "default": true, "description": "Good for newcomers" } ]
closed
false
null
[]
null
[ "Indeed, `clobber=True` (with a warning if the existing protocol will be overwritten) should fix the issue, but maybe a better solution is to register our compression filesystem before the script is executed and unregister them afterward. WDYT @lhoestq @albertvillanova?", "I think we should use clobber and show a warning if it overwrote a registered filesystem indeed ! This way the user can re-register the filesystems if needed. Though they should probably be compatible (and maybe do the exact same thing) so I wouldn't de-register the `datasets` filesystems" ]
2023-05-20T01:39:11
2023-05-25T06:42:34
2023-05-25T06:42:34
NONE
null
null
null
### Describe the bug Hello, I am currently working on a project where both [DataLab](https://github.com/ExpressAI/DataLab) and [datasets](https://github.com/huggingface/datasets) are subdependencies. I noticed that I cannot import both libraries, as they both register FileSystems in `fsspec`, expecting the FileSystems not being registered before. When running the code below, I get the following error: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\__init__.py", line 28, in <module> from datalabs.arrow_dataset import concatenate_datasets, Dataset File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\arrow_dataset.py", line 60, in <module> from datalabs.arrow_writer import ArrowWriter, OptimizedTypedSequence File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\arrow_writer.py", line 28, in <module> from datalabs.features import ( File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\features\__init__.py", line 2, in <module> from datalabs.features.audio import Audio File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\features\audio.py", line 21, in <module> from datalabs.utils.streaming_download_manager import xopen File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\utils\streaming_download_manager.py", line 16, in <module> from datalabs.filesystems import COMPRESSION_FILESYSTEMS File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\datalabs\filesystems\__init__.py", line 37, in <module> fsspec.register_implementation(fs_class.protocol, fs_class) File "C:\Users\Bened\anaconda3\envs\ner-eval-dashboard2\lib\site-packages\fsspec\registry.py", line 51, in register_implementation raise ValueError( ValueError: Name (bz2) already in the registry and clobber is False ``` I think as simple solution would be to just set `clobber=True` in https://github.com/huggingface/datasets/blob/main/src/datasets/filesystems/__init__.py#L28. This allows the register to discard previous registrations. This should work, as the datalabs FileSystems are copies of the datasets FileSystems. However, I don't know if it is guaranteed to be compatible with other libraries that might use the same protocols. I am linking the symmetric issue on [DataLab](https://github.com/ExpressAI/DataLab/issues/425) as ideally the issue is solved in both libraries the same way. Otherwise, it could lead to different behaviors depending on which library gets imported first. ### Steps to reproduce the bug 1. Run `pip install datalabs==0.4.15 datasets==2.12.0` 2. Run the following python code: ``` import datalabs import datasets ``` ### Expected behavior It should be possible to import both libraries without getting a Value Error ### Environment info datalabs==0.4.15 datasets==2.12.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5876/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5876/timeline
null
completed
https://api.github.com/repos/huggingface/datasets/issues/5875
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5875/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5875/comments
https://api.github.com/repos/huggingface/datasets/issues/5875/events
https://github.com/huggingface/datasets/issues/5875
1,716,770,394
I_kwDODunzps5mU9Za
5,875
Why split slicing doesn't behave like list slicing ?
{ "login": "astariul", "id": 43774355, "node_id": "MDQ6VXNlcjQzNzc0MzU1", "avatar_url": "https://avatars.githubusercontent.com/u/43774355?v=4", "gravatar_id": "", "url": "https://api.github.com/users/astariul", "html_url": "https://github.com/astariul", "followers_url": "https://api.github.com/users/astariul/followers", "following_url": "https://api.github.com/users/astariul/following{/other_user}", "gists_url": "https://api.github.com/users/astariul/gists{/gist_id}", "starred_url": "https://api.github.com/users/astariul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/astariul/subscriptions", "organizations_url": "https://api.github.com/users/astariul/orgs", "repos_url": "https://api.github.com/users/astariul/repos", "events_url": "https://api.github.com/users/astariul/events{/privacy}", "received_events_url": "https://api.github.com/users/astariul/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892865, "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate", "name": "duplicate", "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists" } ]
closed
false
null
[]
null
[ "A duplicate of https://github.com/huggingface/datasets/issues/1774" ]
2023-05-19T07:21:10
2024-01-31T15:54:18
2024-01-31T15:54:18
NONE
null
null
null
### Describe the bug If I want to get the first 10 samples of my dataset, I can do : ``` ds = datasets.load_dataset('mnist', split='train[:10]') ``` But if I exceed the number of samples in the dataset, an exception is raised : ``` ds = datasets.load_dataset('mnist', split='train[:999999999]') ``` > ValueError: Requested slice [:999999999] incompatible with 60000 examples. ### Steps to reproduce the bug ``` ds = datasets.load_dataset('mnist', split='train[:999999999]') ``` ### Expected behavior I would expect it to behave like python lists (no exception raised, the whole list is kept) : ``` d = list(range(1000))[:999999] print(len(d)) # > 1000 ``` ### Environment info - `datasets` version: 2.9.0 - Platform: macOS-12.6-arm64-arm-64bit - Python version: 3.9.12 - PyArrow version: 11.0.0 - Pandas version: 1.5.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5875/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5875/timeline
null
completed