url
stringlengths
61
61
repository_url
stringclasses
1 value
labels_url
stringlengths
75
75
comments_url
stringlengths
70
70
events_url
stringlengths
68
68
html_url
stringlengths
51
51
id
int64
1.29B
1.57B
node_id
stringlengths
18
18
number
int64
4.59k
5.51k
title
stringlengths
10
165
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
null
comments
int64
0
48
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
3 values
active_lock_reason
null
body
stringlengths
51
33.9k
reactions
dict
timeline_url
stringlengths
70
70
performed_via_github_app
null
state_reason
stringclasses
3 values
draft
bool
0 classes
pull_request
dict
is_pull_request
bool
1 class
https://api.github.com/repos/huggingface/datasets/issues/4766
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4766/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4766/comments
https://api.github.com/repos/huggingface/datasets/issues/4766/events
https://github.com/huggingface/datasets/issues/4766
1,321,809,380
I_kwDODunzps5OyTXk
4,766
Dataset Viewer issue for openclimatefix/goes-mrms
{ "login": "cheaterHy", "id": 101324688, "node_id": "U_kgDOBgoXkA", "avatar_url": "https://avatars.githubusercontent.com/u/101324688?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cheaterHy", "html_url": "https://github.com/cheaterHy", "followers_url": "https://api.github.com/users/cheaterHy/followers", "following_url": "https://api.github.com/users/cheaterHy/following{/other_user}", "gists_url": "https://api.github.com/users/cheaterHy/gists{/gist_id}", "starred_url": "https://api.github.com/users/cheaterHy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cheaterHy/subscriptions", "organizations_url": "https://api.github.com/users/cheaterHy/orgs", "repos_url": "https://api.github.com/users/cheaterHy/repos", "events_url": "https://api.github.com/users/cheaterHy/events{/privacy}", "received_events_url": "https://api.github.com/users/cheaterHy/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-29T06:17:14"
"2022-07-29T08:43:58"
null
NONE
null
### Link _No response_ ### Description _No response_ ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4766/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4766/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4761
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4761/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4761/comments
https://api.github.com/repos/huggingface/datasets/issues/4761/events
https://github.com/huggingface/datasets/issues/4761
1,321,068,411
I_kwDODunzps5Oved7
4,761
parallel searching in multi-gpu setting using faiss
{ "login": "xwwwwww", "id": 48146603, "node_id": "MDQ6VXNlcjQ4MTQ2NjAz", "avatar_url": "https://avatars.githubusercontent.com/u/48146603?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xwwwwww", "html_url": "https://github.com/xwwwwww", "followers_url": "https://api.github.com/users/xwwwwww/followers", "following_url": "https://api.github.com/users/xwwwwww/following{/other_user}", "gists_url": "https://api.github.com/users/xwwwwww/gists{/gist_id}", "starred_url": "https://api.github.com/users/xwwwwww/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xwwwwww/subscriptions", "organizations_url": "https://api.github.com/users/xwwwwww/orgs", "repos_url": "https://api.github.com/users/xwwwwww/repos", "events_url": "https://api.github.com/users/xwwwwww/events{/privacy}", "received_events_url": "https://api.github.com/users/xwwwwww/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
25
"2022-07-28T14:57:03"
"2022-08-27T02:08:49"
null
CONTRIBUTOR
null
While I notice that `add_faiss_index` has supported assigning multiple GPUs, I am still confused about how it works. Does the `search-batch` function automatically parallelizes the input queries to different gpus?https://github.com/huggingface/datasets/blob/d76599bdd4d186b2e7c4f468b05766016055a0a5/src/datasets/search.py#L360
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4761/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4761/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4760
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4760/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4760/comments
https://api.github.com/repos/huggingface/datasets/issues/4760/events
https://github.com/huggingface/datasets/issues/4760
1,320,878,223
I_kwDODunzps5OuwCP
4,760
Issue with offline mode
{ "login": "SaulLu", "id": 55560583, "node_id": "MDQ6VXNlcjU1NTYwNTgz", "avatar_url": "https://avatars.githubusercontent.com/u/55560583?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SaulLu", "html_url": "https://github.com/SaulLu", "followers_url": "https://api.github.com/users/SaulLu/followers", "following_url": "https://api.github.com/users/SaulLu/following{/other_user}", "gists_url": "https://api.github.com/users/SaulLu/gists{/gist_id}", "starred_url": "https://api.github.com/users/SaulLu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SaulLu/subscriptions", "organizations_url": "https://api.github.com/users/SaulLu/orgs", "repos_url": "https://api.github.com/users/SaulLu/repos", "events_url": "https://api.github.com/users/SaulLu/events{/privacy}", "received_events_url": "https://api.github.com/users/SaulLu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
4
"2022-07-28T12:45:14"
"2022-07-28T16:05:36"
null
NONE
null
## Describe the bug I can't retrieve a cached dataset with offline mode enabled ## Steps to reproduce the bug To reproduce my issue, first, you'll need to run a script that will cache the dataset ```python import os os.environ["HF_DATASETS_OFFLINE"] = "0" import datasets datasets.logging.set_verbosity_info() ds_name = "SaulLu/toy_struc_dataset" ds = datasets.load_dataset(ds_name) print(ds) ``` then, you can try to reload it in offline mode: ```python import os os.environ["HF_DATASETS_OFFLINE"] = "1" import datasets datasets.logging.set_verbosity_info() ds_name = "SaulLu/toy_struc_dataset" ds = datasets.load_dataset(ds_name) print(ds) ``` ## Expected results I would have expected the 2nd snippet not to return any errors ## Actual results The 2nd snippet returns: ``` Traceback (most recent call last): File "/home/lucile_huggingface_co/sandbox/evaluate/test_cache_datasets.py", line 8, in <module> ds = datasets.load_dataset(ds_name) File "/home/lucile_huggingface_co/anaconda3/envs/evaluate-dev/lib/python3.8/site-packages/datasets/load.py", line 1723, in load_dataset builder_instance = load_dataset_builder( File "/home/lucile_huggingface_co/anaconda3/envs/evaluate-dev/lib/python3.8/site-packages/datasets/load.py", line 1500, in load_dataset_builder dataset_module = dataset_module_factory( File "/home/lucile_huggingface_co/anaconda3/envs/evaluate-dev/lib/python3.8/site-packages/datasets/load.py", line 1241, in dataset_module_factory raise ConnectionError(f"Couln't reach the Hugging Face Hub for dataset '{path}': {e1}") from None ConnectionError: Couln't reach the Hugging Face Hub for dataset 'SaulLu/toy_struc_dataset': Offline mode is enabled. ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: Linux-4.19.0-21-cloud-amd64-x86_64-with-glibc2.17 - Python version: 3.8.13 - PyArrow version: 8.0.0 - Pandas version: 1.4.3 Maybe I'm misunderstanding something in the use of the offline mode (see [doc](https://huggingface.co/docs/datasets/v2.4.0/en/loading#offline)), is that the case?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4760/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4760/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4759
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4759/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4759/comments
https://api.github.com/repos/huggingface/datasets/issues/4759/events
https://github.com/huggingface/datasets/issues/4759
1,320,783,300
I_kwDODunzps5OuY3E
4,759
Dataset Viewer issue for Toygar/turkish-offensive-language-detection
{ "login": "toygarr", "id": 44132720, "node_id": "MDQ6VXNlcjQ0MTMyNzIw", "avatar_url": "https://avatars.githubusercontent.com/u/44132720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/toygarr", "html_url": "https://github.com/toygarr", "followers_url": "https://api.github.com/users/toygarr/followers", "following_url": "https://api.github.com/users/toygarr/following{/other_user}", "gists_url": "https://api.github.com/users/toygarr/gists{/gist_id}", "starred_url": "https://api.github.com/users/toygarr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/toygarr/subscriptions", "organizations_url": "https://api.github.com/users/toygarr/orgs", "repos_url": "https://api.github.com/users/toygarr/repos", "events_url": "https://api.github.com/users/toygarr/events{/privacy}", "received_events_url": "https://api.github.com/users/toygarr/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-28T11:21:43"
"2022-07-28T13:17:56"
"2022-07-28T13:17:48"
NONE
null
### Link https://huggingface.co/datasets/Toygar/turkish-offensive-language-detection ### Description Status code: 400 Exception: Status400Error Message: The dataset does not exist. Hi, I provided train.csv, test.csv and valid.csv files. However, viewer says dataset does not exist. Should I need to do anything else? ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4759/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4759/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4757
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4757/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4757/comments
https://api.github.com/repos/huggingface/datasets/issues/4757/events
https://github.com/huggingface/datasets/issues/4757
1,320,602,532
I_kwDODunzps5Otsuk
4,757
Document better when relative paths are transformed to URLs
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false } ]
null
0
"2022-07-28T08:46:27"
"2022-08-25T18:34:24"
"2022-08-25T18:34:24"
MEMBER
null
As discussed with @ydshieh, when passing a relative path as `data_dir` to `load_dataset` of a dataset hosted on the Hub, the relative path is transformed to the corresponding URL of the Hub dataset. Currently, we mention this in our docs here: [Create a dataset loading script > Download data files and organize splits](https://huggingface.co/docs/datasets/v2.4.0/en/dataset_script#download-data-files-and-organize-splits) > If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs. Maybe we should document better how relative paths are handled, not only when creating a dataset loading script, but also when passing to `load_dataset`: - `data_dir` - `data_files` CC: @stevhliu
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4757/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4757/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4755
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4755/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4755/comments
https://api.github.com/repos/huggingface/datasets/issues/4755/events
https://github.com/huggingface/datasets/issues/4755
1,319,687,044
I_kwDODunzps5OqNOE
4,755
Datasets.map causes incorrect overflow_to_sample_mapping when used with tokenizers and small batch size
{ "login": "srobertjames", "id": 662612, "node_id": "MDQ6VXNlcjY2MjYxMg==", "avatar_url": "https://avatars.githubusercontent.com/u/662612?v=4", "gravatar_id": "", "url": "https://api.github.com/users/srobertjames", "html_url": "https://github.com/srobertjames", "followers_url": "https://api.github.com/users/srobertjames/followers", "following_url": "https://api.github.com/users/srobertjames/following{/other_user}", "gists_url": "https://api.github.com/users/srobertjames/gists{/gist_id}", "starred_url": "https://api.github.com/users/srobertjames/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/srobertjames/subscriptions", "organizations_url": "https://api.github.com/users/srobertjames/orgs", "repos_url": "https://api.github.com/users/srobertjames/repos", "events_url": "https://api.github.com/users/srobertjames/events{/privacy}", "received_events_url": "https://api.github.com/users/srobertjames/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
"2022-07-27T14:54:11"
"2022-07-27T17:57:28"
null
NONE
null
## Describe the bug When using `tokenizer`, we can retrieve the field `overflow_to_sample_mapping`, since long samples will be overflown into multiple token sequences. However, when tokenizing is done via `Dataset.map`, with `n_proc > 1`, the `overflow_to_sample_mapping` field is wrong. This seems to be because each tokenizer only looks at its share of the samples, and maps to the index _within its share_, but then `Dataset.map` collates them together. ## Steps to reproduce the bug 1. Make a dataset of 3 strings. 2. Tokenize via Dataset.map with n_proc = 8 3. Inspect the `overflow_to_sample_mapping` field ## Expected results `[0, 1, 2]` ## Actual results `[0, 0, 0]` Notes: 1. I have not yet extracted a minimal example, but the above works reliably 2. If the dataset is large, I've yet to determine if this bug still happens a. not at all b. always c. on the small, leftover batch at the end.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4755/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4755/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4752
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4752/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4752/comments
https://api.github.com/repos/huggingface/datasets/issues/4752/events
https://github.com/huggingface/datasets/issues/4752
1,319,464,409
I_kwDODunzps5OpW3Z
4,752
DatasetInfo issue when testing multiple configs: mixed task_templates
{ "login": "BramVanroy", "id": 2779410, "node_id": "MDQ6VXNlcjI3Nzk0MTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2779410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BramVanroy", "html_url": "https://github.com/BramVanroy", "followers_url": "https://api.github.com/users/BramVanroy/followers", "following_url": "https://api.github.com/users/BramVanroy/following{/other_user}", "gists_url": "https://api.github.com/users/BramVanroy/gists{/gist_id}", "starred_url": "https://api.github.com/users/BramVanroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BramVanroy/subscriptions", "organizations_url": "https://api.github.com/users/BramVanroy/orgs", "repos_url": "https://api.github.com/users/BramVanroy/repos", "events_url": "https://api.github.com/users/BramVanroy/events{/privacy}", "received_events_url": "https://api.github.com/users/BramVanroy/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
3
"2022-07-27T12:04:54"
"2022-08-08T18:20:50"
null
CONTRIBUTOR
null
## Describe the bug When running the `datasets-cli test` it would seem that some config properties in a DatasetInfo get mangled, leading to issues, e.g., about the ClassLabel. ## Steps to reproduce the bug In summary, what I want to do is create three configs: - unfiltered: no classlabel, no tasks. Gets data from unfiltered.json.gz (I'd want this without splits, just one chunk of data, but that does not seem possible?) - filtered_sentiment: `review_sentiment` as ClassLabel, TextClassification task with `review_sentiment` as label. Gets train/test split from respective json.gz files - filtered_rating: `review_rating0` as ClassLabel, TextClassification task with `review_rating0` as label. Gets train/test split from respective json.gz files This might be a bit tedious to reproduce, so I am sorry, but these are the steps: - Clone datasets -> `datasets/` and install it - Clone `https://huggingface.co/datasets/BramVanroy/hebban-reviews` into `datasets/datasets` so that you have a new folder `datasets/datasets/hebban-reviews/`. - Replace the HebbanReviews class with this new one: ```python class HebbanReviews(datasets.GeneratorBasedBuilder): """The Hebban book reviews dataset.""" BUILDER_CONFIGS = [ HebbanReviewsConfig( name="unfiltered", description=_HEBBAN_REVIEWS_UNFILTERED_DESCRIPTION, version=datasets.Version(_HEBBAN_VERSION) ), HebbanReviewsConfig( name="filtered_sentiment", description=f"This config has the negative, neutral, and positive sentiment scores as ClassLabel in the 'review_sentiment' column.\n{_HEBBAN_REVIEWS_FILTERED_DESCRIPTION}", version=datasets.Version(_HEBBAN_VERSION) ), HebbanReviewsConfig( name="filtered_rating", description=f"This config has the 5-class ratings as ClassLabel in the 'review_rating0' column (which is a variant of 'review_rating' that starts counting from 0 instead of 1).\n{_HEBBAN_REVIEWS_FILTERED_DESCRIPTION}", version=datasets.Version(_HEBBAN_VERSION) ) ] DEFAULT_CONFIG_NAME = "filtered_sentiment" _URLS = { "train": "train.jsonl.gz", "test": "test.jsonl.gz", "unfiltered": "unfiltered.jsonl.gz", } def _info(self): features = { "review_title": datasets.Value("string"), "review_text": datasets.Value("string"), "review_text_without_quotes": datasets.Value("string"), "review_n_quotes": datasets.Value("int32"), "review_n_tokens": datasets.Value("int32"), "review_rating": datasets.Value("int32"), "review_rating0": datasets.Value("int32"), "review_author_url": datasets.Value("string"), "review_author_type": datasets.Value("string"), "review_n_likes": datasets.Value("int32"), "review_n_comments": datasets.Value("int32"), "review_url": datasets.Value("string"), "review_published_date": datasets.Value("string"), "review_crawl_date": datasets.Value("string"), "lid": datasets.Value("string"), "lid_probability": datasets.Value("float32"), "review_sentiment": datasets.features.ClassLabel(names=["negative", "neutral", "positive"]), "review_sentiment_label": datasets.Value("string"), "book_id": datasets.Value("int32"), } if self.config.name == "filtered_sentiment": task_templates = [datasets.TextClassification(text_column="review_text_without_quotes", label_column="review_sentiment")] elif self.config.name == "filtered_rating": # For CrossEntropy, our classes need to start at index 0 -- not 1 features["review_rating0"] = datasets.features.ClassLabel(names=["1", "2", "3", "4", "5"]) features["review_sentiment"] = datasets.Value("int32") task_templates = [datasets.TextClassification(text_column="review_text_without_quotes", label_column="review_rating0")] elif self.config.name == "unfiltered": # no ClassLabels in unfiltered features["review_sentiment"] = datasets.Value("int32") task_templates = None else: raise ValueError(f"Unsupported config {self.config.name}. Expected one of 'filtered_sentiment' (default)," f" 'filtered_rating', or 'unfiltered'") print("AT INFO", self.config.name, task_templates) return datasets.DatasetInfo( description=self.config.description, features=datasets.Features(features), homepage="https://huggingface.co/datasets/BramVanroy/hebban-reviews", citation=_HEBBAN_REVIEWS_CITATION, task_templates=task_templates, license="cc-by-4.0" ) def _split_generators(self, dl_manager): if self.config.name.startswith("filtered"): files = dl_manager.download_and_extract({"train": "train.jsonl.gz", "test": "test.jsonl.gz"}) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": files["train"] }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "data_file": files["test"] }, ), ] elif self.config.name == "unfiltered": files = dl_manager.download_and_extract({"train": "unfiltered.jsonl.gz"}) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": files["train"] }, ), ] else: raise ValueError(f"Unsupported config {self.config.name}. Expected one of 'filtered_sentiment' (default)," f" 'filtered_rating', or 'unfiltered'") def _generate_examples(self, data_file): lines = Path(data_file).open(encoding="utf-8").readlines() for line_idx, line in enumerate(lines): row = json.loads(line) yield line_idx, row ``` - finally, run `datasets-cli test ./datasets/hebban-reviews/ --save_infos --all_configs` from within the topmost `datasets` directory ## Expected results Succeeding tests for three different configs. ## Actual results I printed out the values that are given to `DatasetInfo` for config name and task_templates, as you can see. There, as expected, I get `unfiltered None`. I also modified datasets/info.py and added this line [at L.170](https://github.com/huggingface/datasets/blob/f5847a304aa1b38b3a3c54a8318b4df60f1299bc/src/datasets/info.py#L170): ```python print("INTERNALLY AT INFO.PY", self.config_name, self.task_templates) ``` to my surprise, here I get `unfiltered [TextClassification(task='text-classification', text_column='review_text_without_quotes', label_column='review_sentiment')]`. So one way or another, here I suddenly see that `unfiltered` now does have a task_template -- even though that is not what is written in the data loading script, as the first print statement correctly shows. I do not quite understand how, but it seems that the config name and task_templates get mixed. This ultimately leads to the following error, but this trace may not be very useful in itself: ``` Traceback (most recent call last): File "C:\Users\bramv\.virtualenvs\hebban-U6poXNQd\Scripts\datasets-cli-script.py", line 33, in <module> sys.exit(load_entry_point('datasets', 'console_scripts', 'datasets-cli')()) File "c:\dev\python\hebban\datasets\src\datasets\commands\datasets_cli.py", line 39, in main service.run() File "c:\dev\python\hebban\datasets\src\datasets\commands\test.py", line 144, in run builder.as_dataset() File "c:\dev\python\hebban\datasets\src\datasets\builder.py", line 899, in as_dataset datasets = map_nested( File "c:\dev\python\hebban\datasets\src\datasets\utils\py_utils.py", line 393, in map_nested mapped = [ File "c:\dev\python\hebban\datasets\src\datasets\utils\py_utils.py", line 394, in <listcomp> _single_map_nested((function, obj, types, None, True, None)) File "c:\dev\python\hebban\datasets\src\datasets\utils\py_utils.py", line 330, in _single_map_nested return function(data_struct) File "c:\dev\python\hebban\datasets\src\datasets\builder.py", line 930, in _build_single_dataset ds = self._as_dataset( File "c:\dev\python\hebban\datasets\src\datasets\builder.py", line 1006, in _as_dataset return Dataset(fingerprint=fingerprint, **dataset_kwargs) File "c:\dev\python\hebban\datasets\src\datasets\arrow_dataset.py", line 661, in __init__ info = info.copy() if info is not None else DatasetInfo() File "c:\dev\python\hebban\datasets\src\datasets\info.py", line 286, in copy return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) File "<string>", line 20, in __init__ File "c:\dev\python\hebban\datasets\src\datasets\info.py", line 176, in __post_init__ self.task_templates = [ File "c:\dev\python\hebban\datasets\src\datasets\info.py", line 177, in <listcomp> template.align_with_features(self.features) for template in (self.task_templates) File "c:\dev\python\hebban\datasets\src\datasets\tasks\text_classification.py", line 22, in align_with_features raise ValueError(f"Column {self.label_column} is not a ClassLabel.") ValueError: Column review_sentiment is not a ClassLabel. ``` ## Environment info - `datasets` version: 2.4.1.dev0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.8.8 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4752/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4752/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4750
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4750/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4750/comments
https://api.github.com/repos/huggingface/datasets/issues/4750/events
https://github.com/huggingface/datasets/issues/4750
1,319,333,645
I_kwDODunzps5Oo28N
4,750
Easily create loading script for benchmark comprising multiple huggingface datasets
{ "login": "JoelNiklaus", "id": 3775944, "node_id": "MDQ6VXNlcjM3NzU5NDQ=", "avatar_url": "https://avatars.githubusercontent.com/u/3775944?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JoelNiklaus", "html_url": "https://github.com/JoelNiklaus", "followers_url": "https://api.github.com/users/JoelNiklaus/followers", "following_url": "https://api.github.com/users/JoelNiklaus/following{/other_user}", "gists_url": "https://api.github.com/users/JoelNiklaus/gists{/gist_id}", "starred_url": "https://api.github.com/users/JoelNiklaus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoelNiklaus/subscriptions", "organizations_url": "https://api.github.com/users/JoelNiklaus/orgs", "repos_url": "https://api.github.com/users/JoelNiklaus/repos", "events_url": "https://api.github.com/users/JoelNiklaus/events{/privacy}", "received_events_url": "https://api.github.com/users/JoelNiklaus/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
2
"2022-07-27T10:13:38"
"2022-07-27T13:58:07"
"2022-07-27T13:58:07"
CONTRIBUTOR
null
Hi, I would like to create a loading script for a benchmark comprising multiple huggingface datasets. The function _split_generators needs to return the files for the respective dataset. However, the files are not always in the same location for each dataset. I want to just make a wrapper dataset that provides a single interface to all the underlying datasets. I thought about downloading the files with the load_dataset function and then providing the link to the cached file. But this seems a bit inelegant to me. What approach would you propose to do this? Please let me know if you have any questions. Cheers, Joel
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4750/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4750/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4746
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4746/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4746/comments
https://api.github.com/repos/huggingface/datasets/issues/4746/events
https://github.com/huggingface/datasets/issues/4746
1,318,486,599
I_kwDODunzps5OloJH
4,746
Dataset Viewer issue for yanekyuk/wikikey
{ "login": "ai-ashok", "id": 91247690, "node_id": "MDQ6VXNlcjkxMjQ3Njkw", "avatar_url": "https://avatars.githubusercontent.com/u/91247690?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ai-ashok", "html_url": "https://github.com/ai-ashok", "followers_url": "https://api.github.com/users/ai-ashok/followers", "following_url": "https://api.github.com/users/ai-ashok/following{/other_user}", "gists_url": "https://api.github.com/users/ai-ashok/gists{/gist_id}", "starred_url": "https://api.github.com/users/ai-ashok/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ai-ashok/subscriptions", "organizations_url": "https://api.github.com/users/ai-ashok/orgs", "repos_url": "https://api.github.com/users/ai-ashok/repos", "events_url": "https://api.github.com/users/ai-ashok/events{/privacy}", "received_events_url": "https://api.github.com/users/ai-ashok/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
2
"2022-07-26T16:25:16"
"2022-09-08T08:15:22"
"2022-09-08T08:15:22"
NONE
null
### Link _No response_ ### Description _No response_ ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4746/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4746/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4745
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4745/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4745/comments
https://api.github.com/repos/huggingface/datasets/issues/4745/events
https://github.com/huggingface/datasets/issues/4745
1,318,016,655
I_kwDODunzps5Oj1aP
4,745
Allow `list_datasets` to include private datasets
{ "login": "ola13", "id": 1528523, "node_id": "MDQ6VXNlcjE1Mjg1MjM=", "avatar_url": "https://avatars.githubusercontent.com/u/1528523?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ola13", "html_url": "https://github.com/ola13", "followers_url": "https://api.github.com/users/ola13/followers", "following_url": "https://api.github.com/users/ola13/following{/other_user}", "gists_url": "https://api.github.com/users/ola13/gists{/gist_id}", "starred_url": "https://api.github.com/users/ola13/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ola13/subscriptions", "organizations_url": "https://api.github.com/users/ola13/orgs", "repos_url": "https://api.github.com/users/ola13/repos", "events_url": "https://api.github.com/users/ola13/events{/privacy}", "received_events_url": "https://api.github.com/users/ola13/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
3
"2022-07-26T10:16:08"
"2022-07-26T11:59:25"
null
NONE
null
I am working with a large collection of private datasets, it would be convenient for me to be able to list them. I would envision extending the convention of using `use_auth_token` keyword argument to `list_datasets` function, then calling: ``` list_datasets(use_auth_token="my_token") ``` would return the list of all datasets I have permissions to view, including private ones. The only current alternative I see is to use the hub website to manually obtain the list of dataset names - this is in the context of BigScience where respective private spaces contain hundreds of datasets, so not very convenient to list manually.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4745/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4745/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4744
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4744/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4744/comments
https://api.github.com/repos/huggingface/datasets/issues/4744/events
https://github.com/huggingface/datasets/issues/4744
1,317,822,345
I_kwDODunzps5OjF-J
4,744
Remove instructions to generate dummy data from our docs
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false } ]
null
2
"2022-07-26T07:32:58"
"2022-08-02T23:50:30"
"2022-08-02T23:50:30"
MEMBER
null
In our docs, we indicate to generate the dummy data: https://huggingface.co/docs/datasets/dataset_script#testing-data-and-checksum-metadata However: - dummy data makes sense only for datasets in our GitHub repo: so that we can test their loading with our CI - for datasets on the Hub: - they do not pass any CI test requiring dummy data - there are no instructions on how they can test their dataset locally using the dummy data - the generation of the dummy data assumes our GitHub directory structure: - the dummy data will be generated under `./datasets/<dataset_name>/dummy` even if locally there is no `./datasets` directory (which is the usual case). See issue: - #4742 CC: @stevhliu
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4744/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4744/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4742
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4742/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4742/comments
https://api.github.com/repos/huggingface/datasets/issues/4742/events
https://github.com/huggingface/datasets/issues/4742
1,317,260,663
I_kwDODunzps5Og813
4,742
Dummy data nowhere to be found
{ "login": "BramVanroy", "id": 2779410, "node_id": "MDQ6VXNlcjI3Nzk0MTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2779410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BramVanroy", "html_url": "https://github.com/BramVanroy", "followers_url": "https://api.github.com/users/BramVanroy/followers", "following_url": "https://api.github.com/users/BramVanroy/following{/other_user}", "gists_url": "https://api.github.com/users/BramVanroy/gists{/gist_id}", "starred_url": "https://api.github.com/users/BramVanroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BramVanroy/subscriptions", "organizations_url": "https://api.github.com/users/BramVanroy/orgs", "repos_url": "https://api.github.com/users/BramVanroy/repos", "events_url": "https://api.github.com/users/BramVanroy/events{/privacy}", "received_events_url": "https://api.github.com/users/BramVanroy/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
"2022-07-25T19:18:42"
"2022-11-04T14:04:24"
"2022-11-04T14:04:10"
CONTRIBUTOR
null
## Describe the bug To finalize my dataset, I wanted to create dummy data as per the guide and I ran ```shell datasets-cli dummy_data datasets/hebban-reviews --auto_generate ``` where hebban-reviews is [this repo](https://huggingface.co/datasets/BramVanroy/hebban-reviews). And even though the scripts runs and shows a message at the end that it succeeded, I cannot find the dummy data anywhere. Where is it? ## Expected results To see the dummy data in the datasets' folder or in the folder where I ran the command. ## Actual results I see the following message but I cannot find the dummy data anywhere. ``` Dummy data generation done and dummy data test succeeded for config 'filtered''. Automatic dummy data generation succeeded for all configs of '.\datasets\hebban-reviews\' ``` ## Environment info - `datasets` version: 2.4.1.dev0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.8.8 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4742/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4742/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4737
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4737/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4737/comments
https://api.github.com/repos/huggingface/datasets/issues/4737/events
https://github.com/huggingface/datasets/issues/4737
1,315,011,004
I_kwDODunzps5OYXm8
4,737
Download error on scene_parse_150
{ "login": "juliensimon", "id": 3436143, "node_id": "MDQ6VXNlcjM0MzYxNDM=", "avatar_url": "https://avatars.githubusercontent.com/u/3436143?v=4", "gravatar_id": "", "url": "https://api.github.com/users/juliensimon", "html_url": "https://github.com/juliensimon", "followers_url": "https://api.github.com/users/juliensimon/followers", "following_url": "https://api.github.com/users/juliensimon/following{/other_user}", "gists_url": "https://api.github.com/users/juliensimon/gists{/gist_id}", "starred_url": "https://api.github.com/users/juliensimon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/juliensimon/subscriptions", "organizations_url": "https://api.github.com/users/juliensimon/orgs", "repos_url": "https://api.github.com/users/juliensimon/repos", "events_url": "https://api.github.com/users/juliensimon/events{/privacy}", "received_events_url": "https://api.github.com/users/juliensimon/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
"2022-07-22T13:28:28"
"2022-09-01T15:37:11"
"2022-09-01T15:37:11"
NONE
null
``` from datasets import load_dataset dataset = load_dataset("scene_parse_150", "scene_parsing") FileNotFoundError: Couldn't find file at http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4737/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4737/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4736
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4736/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4736/comments
https://api.github.com/repos/huggingface/datasets/issues/4736/events
https://github.com/huggingface/datasets/issues/4736
1,314,931,996
I_kwDODunzps5OYEUc
4,736
Dataset Viewer issue for deepklarity/huggingface-spaces-dataset
{ "login": "dk-crazydiv", "id": 47515542, "node_id": "MDQ6VXNlcjQ3NTE1NTQy", "avatar_url": "https://avatars.githubusercontent.com/u/47515542?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dk-crazydiv", "html_url": "https://github.com/dk-crazydiv", "followers_url": "https://api.github.com/users/dk-crazydiv/followers", "following_url": "https://api.github.com/users/dk-crazydiv/following{/other_user}", "gists_url": "https://api.github.com/users/dk-crazydiv/gists{/gist_id}", "starred_url": "https://api.github.com/users/dk-crazydiv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dk-crazydiv/subscriptions", "organizations_url": "https://api.github.com/users/dk-crazydiv/orgs", "repos_url": "https://api.github.com/users/dk-crazydiv/repos", "events_url": "https://api.github.com/users/dk-crazydiv/events{/privacy}", "received_events_url": "https://api.github.com/users/dk-crazydiv/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-22T12:14:18"
"2022-07-22T13:46:38"
"2022-07-22T13:46:38"
NONE
null
### Link https://huggingface.co/datasets/deepklarity/huggingface-spaces-dataset/viewer/deepklarity--huggingface-spaces-dataset/train ### Description Hi Team, I'm getting the following error on a uploaded dataset. I'm getting the same status for a couple of hours now. The dataset size is `<1MB` and the format is csv, so I'm not sure if it's supposed to take this much time or not. ``` Status code: 400 Exception: Status400Error Message: The split is being processed. Retry later. ``` Is there any explicit step to be taken to get the viewer to work? ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4736/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4736/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4734
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4734/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4734/comments
https://api.github.com/repos/huggingface/datasets/issues/4734/events
https://github.com/huggingface/datasets/issues/4734
1,314,495,382
I_kwDODunzps5OWZuW
4,734
Package rouge-score cannot be imported
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-22T07:15:05"
"2022-07-22T07:45:19"
"2022-07-22T07:45:18"
MEMBER
null
## Describe the bug After the today release of `rouge_score-0.0.7` it seems no longer importable. Our CI fails: https://github.com/huggingface/datasets/runs/7463218591?check_suite_focus=true ``` FAILED tests/test_dataset_common.py::LocalDatasetTest::test_builder_class_bigbench FAILED tests/test_dataset_common.py::LocalDatasetTest::test_builder_configs_bigbench FAILED tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_bigbench FAILED tests/test_metric_common.py::LocalMetricTest::test_load_metric_rouge ``` with errors: ``` > from rouge_score import rouge_scorer E ModuleNotFoundError: No module named 'rouge_score' ``` ``` E ImportError: To be able to use rouge, you need to install the following dependency: rouge_score. E Please install it using 'pip install rouge_score' for instance' ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4734/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4734/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4733
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4733/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4733/comments
https://api.github.com/repos/huggingface/datasets/issues/4733/events
https://github.com/huggingface/datasets/issues/4733
1,314,479,616
I_kwDODunzps5OWV4A
4,733
rouge metric
{ "login": "asking28", "id": 29248466, "node_id": "MDQ6VXNlcjI5MjQ4NDY2", "avatar_url": "https://avatars.githubusercontent.com/u/29248466?v=4", "gravatar_id": "", "url": "https://api.github.com/users/asking28", "html_url": "https://github.com/asking28", "followers_url": "https://api.github.com/users/asking28/followers", "following_url": "https://api.github.com/users/asking28/following{/other_user}", "gists_url": "https://api.github.com/users/asking28/gists{/gist_id}", "starred_url": "https://api.github.com/users/asking28/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/asking28/subscriptions", "organizations_url": "https://api.github.com/users/asking28/orgs", "repos_url": "https://api.github.com/users/asking28/repos", "events_url": "https://api.github.com/users/asking28/events{/privacy}", "received_events_url": "https://api.github.com/users/asking28/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-22T07:06:51"
"2022-07-22T09:08:02"
"2022-07-22T09:05:35"
NONE
null
## Describe the bug A clear and concise description of what the bug is. Loading Rouge metric gives error after latest rouge-score==0.0.7 release. Downgrading rougemetric==0.0.4 works fine. ## Steps to reproduce the bug ```python # Sample code to reproduce the bug ``` ## Expected results A clear and concise description of the expected results. from rouge_score import rouge_scorer, scoring should run ## Actual results Specify the actual results or traceback. File "/root/.cache/huggingface/modules/datasets_modules/metrics/rouge/0ffdb60f436bdb8884d5e4d608d53dbe108e82dac4f494a66f80ef3f647c104f/rouge.py", line 21, in <module> from rouge_score import rouge_scorer, scoring ImportError: cannot import name 'rouge_scorer' from 'rouge_score' (unknown location) ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: - Platform: Linux - Python version:3.9 - PyArrow version:
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4733/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4733/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4732
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4732/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4732/comments
https://api.github.com/repos/huggingface/datasets/issues/4732/events
https://github.com/huggingface/datasets/issues/4732
1,314,371,566
I_kwDODunzps5OV7fu
4,732
Document better that loading a dataset passing its name does not use the local script
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
3
"2022-07-22T06:07:31"
"2022-08-23T16:32:23"
"2022-08-23T16:32:23"
MEMBER
null
As reported by @TrentBrick here https://github.com/huggingface/datasets/issues/4725#issuecomment-1191858596, it could be more clear that loading a dataset by passing its name does not use the (modified) local script of it. What he did: - he installed `datasets` from source - he modified locally `datasets/the_pile/the_pile.py` loading script - he tried to load it but using `load_dataset("the_pile")` instead of `load_dataset("datasets/the_pile")` - as explained here https://github.com/huggingface/datasets/issues/4725#issuecomment-1191040245: - the former does not use the local script, but instead it downloads a copy of `the_pile.py` from our GitHub, caches it locally (inside `~/.cache/huggingface/modules`) and uses that. He suggests adding a more clear explanation about this. He suggests adding it maybe in [Installation > source](https://huggingface.co/docs/datasets/installation)) CC: @stevhliu
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4732/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4732/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4730
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4730/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4730/comments
https://api.github.com/repos/huggingface/datasets/issues/4730/events
https://github.com/huggingface/datasets/issues/4730
1,313,421,263
I_kwDODunzps5OSTfP
4,730
Loading imagenet-1k validation split takes much more RAM than expected
{ "login": "fxmarty", "id": 9808326, "node_id": "MDQ6VXNlcjk4MDgzMjY=", "avatar_url": "https://avatars.githubusercontent.com/u/9808326?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fxmarty", "html_url": "https://github.com/fxmarty", "followers_url": "https://api.github.com/users/fxmarty/followers", "following_url": "https://api.github.com/users/fxmarty/following{/other_user}", "gists_url": "https://api.github.com/users/fxmarty/gists{/gist_id}", "starred_url": "https://api.github.com/users/fxmarty/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fxmarty/subscriptions", "organizations_url": "https://api.github.com/users/fxmarty/orgs", "repos_url": "https://api.github.com/users/fxmarty/repos", "events_url": "https://api.github.com/users/fxmarty/events{/privacy}", "received_events_url": "https://api.github.com/users/fxmarty/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
"2022-07-21T15:14:06"
"2022-07-21T16:41:04"
"2022-07-21T16:41:04"
CONTRIBUTOR
null
## Describe the bug Loading into memory the validation split of imagenet-1k takes much more RAM than expected. Assuming ImageNet-1k is 150 GB, split is 50000 validation images and 1,281,167 train images, I would expect only about 6 GB loaded in RAM. ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("imagenet-1k", split="validation") print(dataset) """prints Dataset({ features: ['image', 'label'], num_rows: 50000 }) """ pipe_inputs = dataset["image"] # and wait :-) ``` ## Expected results Use only < 10 GB RAM when loading the images. ## Actual results ![image](https://user-images.githubusercontent.com/9808326/180249183-62f75ca4-d127-402a-9330-f12825a22b0a.png) ``` Using custom data configuration default Reusing dataset imagenet-1k (/home/fxmarty/.cache/huggingface/datasets/imagenet-1k/default/1.0.0/a1e9bfc56c3a7350165007d1176b15e9128fcaf9ab972147840529aed3ae52bc) Killed ``` ## Environment info - `datasets` version: 2.3.3.dev0 - Platform: Linux-5.15.0-41-generic-x86_64-with-glibc2.35 - Python version: 3.9.12 - PyArrow version: 7.0.0 - Pandas version: 1.3.5 - datasets commit: 4e4222f1b6362c2788aec0dd2cd8cede6dd17b80
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4730/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4730/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4728
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4728/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4728/comments
https://api.github.com/repos/huggingface/datasets/issues/4728/events
https://github.com/huggingface/datasets/issues/4728
1,312,897,454
I_kwDODunzps5OQTmu
4,728
load_dataset gives "403" error when using Financial Phrasebank
{ "login": "rohitvincent", "id": 2209134, "node_id": "MDQ6VXNlcjIyMDkxMzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2209134?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rohitvincent", "html_url": "https://github.com/rohitvincent", "followers_url": "https://api.github.com/users/rohitvincent/followers", "following_url": "https://api.github.com/users/rohitvincent/following{/other_user}", "gists_url": "https://api.github.com/users/rohitvincent/gists{/gist_id}", "starred_url": "https://api.github.com/users/rohitvincent/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rohitvincent/subscriptions", "organizations_url": "https://api.github.com/users/rohitvincent/orgs", "repos_url": "https://api.github.com/users/rohitvincent/repos", "events_url": "https://api.github.com/users/rohitvincent/events{/privacy}", "received_events_url": "https://api.github.com/users/rohitvincent/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
3
"2022-07-21T08:43:32"
"2022-08-04T08:32:35"
"2022-08-04T08:32:35"
NONE
null
I tried both codes below to download the financial phrasebank dataset (https://huggingface.co/datasets/financial_phrasebank) with the sentences_allagree subset. However, the code gives a 403 error when executed from multiple machines locally or on the cloud. ``` from datasets import load_dataset, DownloadMode load_dataset(path='financial_phrasebank',name='sentences_allagree',download_mode=DownloadMode.FORCE_REDOWNLOAD) ``` ``` from datasets import load_dataset, DownloadMode load_dataset(path='financial_phrasebank',name='sentences_allagree') ``` **Error** ConnectionError: Couldn't reach https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip (error 403)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4728/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4728/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4727
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4727/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4727/comments
https://api.github.com/repos/huggingface/datasets/issues/4727/events
https://github.com/huggingface/datasets/issues/4727
1,312,645,391
I_kwDODunzps5OPWEP
4,727
Dataset Viewer issue for TheNoob3131/mosquito-data
{ "login": "thenerd31", "id": 53668030, "node_id": "MDQ6VXNlcjUzNjY4MDMw", "avatar_url": "https://avatars.githubusercontent.com/u/53668030?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thenerd31", "html_url": "https://github.com/thenerd31", "followers_url": "https://api.github.com/users/thenerd31/followers", "following_url": "https://api.github.com/users/thenerd31/following{/other_user}", "gists_url": "https://api.github.com/users/thenerd31/gists{/gist_id}", "starred_url": "https://api.github.com/users/thenerd31/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thenerd31/subscriptions", "organizations_url": "https://api.github.com/users/thenerd31/orgs", "repos_url": "https://api.github.com/users/thenerd31/repos", "events_url": "https://api.github.com/users/thenerd31/events{/privacy}", "received_events_url": "https://api.github.com/users/thenerd31/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
null
[]
null
1
"2022-07-21T05:24:48"
"2022-07-21T07:51:56"
"2022-07-21T07:45:01"
NONE
null
### Link https://huggingface.co/datasets/TheNoob3131/mosquito-data/viewer/TheNoob3131--mosquito-data/test ### Description Dataset preview not showing with large files. Says 'split cache is empty' even though there are train and test splits. ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4727/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4727/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4725
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4725/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4725/comments
https://api.github.com/repos/huggingface/datasets/issues/4725/events
https://github.com/huggingface/datasets/issues/4725
1,311,907,096
I_kwDODunzps5OMh0Y
4,725
the_pile datasets URL broken.
{ "login": "TrentBrick", "id": 12433427, "node_id": "MDQ6VXNlcjEyNDMzNDI3", "avatar_url": "https://avatars.githubusercontent.com/u/12433427?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TrentBrick", "html_url": "https://github.com/TrentBrick", "followers_url": "https://api.github.com/users/TrentBrick/followers", "following_url": "https://api.github.com/users/TrentBrick/following{/other_user}", "gists_url": "https://api.github.com/users/TrentBrick/gists{/gist_id}", "starred_url": "https://api.github.com/users/TrentBrick/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TrentBrick/subscriptions", "organizations_url": "https://api.github.com/users/TrentBrick/orgs", "repos_url": "https://api.github.com/users/TrentBrick/repos", "events_url": "https://api.github.com/users/TrentBrick/events{/privacy}", "received_events_url": "https://api.github.com/users/TrentBrick/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
5
"2022-07-20T20:57:30"
"2022-07-22T06:09:46"
"2022-07-21T07:38:19"
NONE
null
https://github.com/huggingface/datasets/pull/3627 changed the Eleuther AI Pile dataset URL from https://the-eye.eu/ to https://mystic.the-eye.eu/ but the latter is now broken and the former works again. Note that when I git clone the repo and use `pip install -e .` and then edit the URL back the codebase doesn't seem to use this edit so the mystic URL is also cached somewhere else that I can't find?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4725/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4725/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4721
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4721/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4721/comments
https://api.github.com/repos/huggingface/datasets/issues/4721/events
https://github.com/huggingface/datasets/issues/4721
1,310,253,552
I_kwDODunzps5OGOHw
4,721
PyArrow Dataset error when calling `load_dataset`
{ "login": "piraka9011", "id": 16828657, "node_id": "MDQ6VXNlcjE2ODI4NjU3", "avatar_url": "https://avatars.githubusercontent.com/u/16828657?v=4", "gravatar_id": "", "url": "https://api.github.com/users/piraka9011", "html_url": "https://github.com/piraka9011", "followers_url": "https://api.github.com/users/piraka9011/followers", "following_url": "https://api.github.com/users/piraka9011/following{/other_user}", "gists_url": "https://api.github.com/users/piraka9011/gists{/gist_id}", "starred_url": "https://api.github.com/users/piraka9011/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/piraka9011/subscriptions", "organizations_url": "https://api.github.com/users/piraka9011/orgs", "repos_url": "https://api.github.com/users/piraka9011/repos", "events_url": "https://api.github.com/users/piraka9011/events{/privacy}", "received_events_url": "https://api.github.com/users/piraka9011/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
3
"2022-07-20T01:16:03"
"2022-07-22T14:11:47"
null
NONE
null
## Describe the bug I am fine tuning a wav2vec2 model following the script here using my own dataset: https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py Loading my Audio dataset from the hub which was originally generated from disk results in the following PyArrow error: ```sh File "/home/ubuntu/w2v2/run_speech_recognition_ctc.py", line 227, in main raw_datasets = load_dataset( File "/home/ubuntu/.virtualenvs/meval/lib/python3.8/site-packages/datasets/load.py", line 1679, in load_dataset builder_instance.download_and_prepare( File "/home/ubuntu/.virtualenvs/meval/lib/python3.8/site-packages/datasets/builder.py", line 704, in download_and_prepare self._download_and_prepare( File "/home/ubuntu/.virtualenvs/meval/lib/python3.8/site-packages/datasets/builder.py", line 793, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/ubuntu/.virtualenvs/meval/lib/python3.8/site-packages/datasets/builder.py", line 1268, in _prepare_split for key, table in logging.tqdm( File "/home/ubuntu/.virtualenvs/meval/lib/python3.8/site-packages/tqdm/std.py", line 1195, in __iter__ for obj in iterable: File "/home/ubuntu/.virtualenvs/meval/lib/python3.8/site-packages/datasets/packaged_modules/parquet/parquet.py", line 68, in _generate_tables for batch_idx, record_batch in enumerate( File "pyarrow/_parquet.pyx", line 1309, in iter_batches File "pyarrow/error.pxi", line 121, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Nested data conversions not implemented for chunked array outputs ``` ## Steps to reproduce the bug I created a dataset from a JSON lines manifest of `audio_filepath`, `text`, and `duration`. When creating the dataset, I do something like this: ```python import json from datasets import Dataset, Audio # manifest_lines is a list of dicts w/ "audio_filepath", "duration", and "text for line in manifest_lines: line = line.strip() if line: line_dict = json.loads(line) manifest_dict["audio"].append(f"{root_path}/{line_dict['audio_filepath']}") manifest_dict["duration"].append(line_dict["duration"]) manifest_dict["transcription"].append(line_dict["text"]) # Create a HF dataset dataset = Dataset.from_dict(manifest_dict).cast_column( "audio", Audio(sampling_rate=16_000), ) # From the docs for saving to disk # https://huggingface.co/docs/datasets/v2.3.2/en/package_reference/main_classes#datasets.Dataset.save_to_disk def read_audio_file(example): with open(example["audio"]["path"], "rb") as f: return {"audio": {"bytes": f.read()}} dataset = dataset.map(read_audio_file, num_proc=70) dataset.save_to_disk(f"/audio-data/hf/{artifact_name}") dataset.push_to_hub(f"{org-name}/{artifact_name}", max_shard_size="5GB", private=True) ``` Then when I call `load_dataset()` in my training script, with the same dataset I generated above, and download from the huggingface hub I get the above stack trace. I am able to load the dataset fine if I use `load_from_disk()`. ## Expected results `load_dataset()` should behave just like `load_from_disk()` and not cause any errors. ## Actual results See above ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> I am using the `huggingface/transformers-pytorch-gpu:latest` image - `datasets` version: 2.3.0 - Platform: Docker/Ubuntu 20.04 - Python version: 3.8 - PyArrow version: 8.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4721/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4721/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4720
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4720/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4720/comments
https://api.github.com/repos/huggingface/datasets/issues/4720/events
https://github.com/huggingface/datasets/issues/4720
1,309,980,195
I_kwDODunzps5OFLYj
4,720
Dataset Viewer issue for shamikbose89/lancaster_newsbooks
{ "login": "shamikbose", "id": 50837285, "node_id": "MDQ6VXNlcjUwODM3Mjg1", "avatar_url": "https://avatars.githubusercontent.com/u/50837285?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shamikbose", "html_url": "https://github.com/shamikbose", "followers_url": "https://api.github.com/users/shamikbose/followers", "following_url": "https://api.github.com/users/shamikbose/following{/other_user}", "gists_url": "https://api.github.com/users/shamikbose/gists{/gist_id}", "starred_url": "https://api.github.com/users/shamikbose/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shamikbose/subscriptions", "organizations_url": "https://api.github.com/users/shamikbose/orgs", "repos_url": "https://api.github.com/users/shamikbose/repos", "events_url": "https://api.github.com/users/shamikbose/events{/privacy}", "received_events_url": "https://api.github.com/users/shamikbose/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
4
"2022-07-19T20:00:07"
"2022-09-08T16:47:21"
"2022-09-08T16:47:21"
NONE
null
### Link https://huggingface.co/datasets/shamikbose89/lancaster_newsbooks ### Description Status code: 400 Exception: ValueError Message: Cannot seek streaming HTTP file I am able to use the dataset loading script locally and it also runs when I'm using the one from the hub, but the viewer still doesn't load ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4720/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4720/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4719
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4719/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4719/comments
https://api.github.com/repos/huggingface/datasets/issues/4719/events
https://github.com/huggingface/datasets/issues/4719
1,309,854,492
I_kwDODunzps5OEssc
4,719
Issue loading TheNoob3131/mosquito-data dataset
{ "login": "thenerd31", "id": 53668030, "node_id": "MDQ6VXNlcjUzNjY4MDMw", "avatar_url": "https://avatars.githubusercontent.com/u/53668030?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thenerd31", "html_url": "https://github.com/thenerd31", "followers_url": "https://api.github.com/users/thenerd31/followers", "following_url": "https://api.github.com/users/thenerd31/following{/other_user}", "gists_url": "https://api.github.com/users/thenerd31/gists{/gist_id}", "starred_url": "https://api.github.com/users/thenerd31/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thenerd31/subscriptions", "organizations_url": "https://api.github.com/users/thenerd31/orgs", "repos_url": "https://api.github.com/users/thenerd31/repos", "events_url": "https://api.github.com/users/thenerd31/events{/privacy}", "received_events_url": "https://api.github.com/users/thenerd31/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
2
"2022-07-19T17:47:37"
"2022-07-20T06:46:57"
"2022-07-20T06:46:02"
NONE
null
![image](https://user-images.githubusercontent.com/53668030/179815591-d75fa7d3-3122-485f-a852-b06a68909066.png) So my dataset is public in the Huggingface Hub, but when I try to load it using the load_dataset command, it shows that it is downloading the files, but throws a ValueError. When I went to my directory to see if the files were downloaded, the folder was blank. Here is the error below: ValueError Traceback (most recent call last) Input In [8], in <cell line: 3>() 1 from datasets import load_dataset ----> 3 dataset = load_dataset("TheNoob3131/mosquito-data", split="train") File ~\Anaconda3\lib\site-packages\datasets\load.py:1679, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1676 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES 1678 # Download and prepare data -> 1679 builder_instance.download_and_prepare( 1680 download_config=download_config, 1681 download_mode=download_mode, 1682 ignore_verifications=ignore_verifications, 1683 try_from_hf_gcs=try_from_hf_gcs, 1684 use_auth_token=use_auth_token, 1685 ) 1687 # Build dataset for splits 1688 keep_in_memory = ( 1689 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 1690 ) Is the dataset in the wrong format or is there some security permission that I should enable?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4719/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4719/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4717
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4717/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4717/comments
https://api.github.com/repos/huggingface/datasets/issues/4717/events
https://github.com/huggingface/datasets/issues/4717
1,309,512,483
I_kwDODunzps5ODZMj
4,717
Dataset Viewer issue for LawalAfeez/englishreview-ds-mini
{ "login": "lawalAfeez820", "id": 69974956, "node_id": "MDQ6VXNlcjY5OTc0OTU2", "avatar_url": "https://avatars.githubusercontent.com/u/69974956?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lawalAfeez820", "html_url": "https://github.com/lawalAfeez820", "followers_url": "https://api.github.com/users/lawalAfeez820/followers", "following_url": "https://api.github.com/users/lawalAfeez820/following{/other_user}", "gists_url": "https://api.github.com/users/lawalAfeez820/gists{/gist_id}", "starred_url": "https://api.github.com/users/lawalAfeez820/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lawalAfeez820/subscriptions", "organizations_url": "https://api.github.com/users/lawalAfeez820/orgs", "repos_url": "https://api.github.com/users/lawalAfeez820/repos", "events_url": "https://api.github.com/users/lawalAfeez820/events{/privacy}", "received_events_url": "https://api.github.com/users/lawalAfeez820/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-19T13:19:39"
"2022-07-20T08:32:57"
"2022-07-20T08:32:57"
NONE
null
### Link _No response_ ### Description Unable to view the split data ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4717/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4717/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4711
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4711/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4711/comments
https://api.github.com/repos/huggingface/datasets/issues/4711/events
https://github.com/huggingface/datasets/issues/4711
1,309,138,570
I_kwDODunzps5OB96K
4,711
Document how to create a dataset loading script for audio/vision
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
open
false
null
[]
null
0
"2022-07-19T08:03:40"
"2022-08-01T15:08:11"
null
MEMBER
null
Currently, in our docs for Audio/Vision/Text, we explain how to: - Load data - Process data However we only explain how to *Create a dataset loading script* for text data. I think it would be useful that we add the same for Audio/Vision as these have some specificities different from Text. See, for example: - #4697 - and comment there: https://github.com/huggingface/datasets/issues/4697#issuecomment-1191502492 CC: @stevhliu
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4711/reactions", "total_count": 4, "+1": 4, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4711/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4709
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4709/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4709/comments
https://api.github.com/repos/huggingface/datasets/issues/4709/events
https://github.com/huggingface/datasets/issues/4709
1,308,633,093
I_kwDODunzps5OACgF
4,709
WMT21 & WMT22
{ "login": "Muennighoff", "id": 62820084, "node_id": "MDQ6VXNlcjYyODIwMDg0", "avatar_url": "https://avatars.githubusercontent.com/u/62820084?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Muennighoff", "html_url": "https://github.com/Muennighoff", "followers_url": "https://api.github.com/users/Muennighoff/followers", "following_url": "https://api.github.com/users/Muennighoff/following{/other_user}", "gists_url": "https://api.github.com/users/Muennighoff/gists{/gist_id}", "starred_url": "https://api.github.com/users/Muennighoff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Muennighoff/subscriptions", "organizations_url": "https://api.github.com/users/Muennighoff/orgs", "repos_url": "https://api.github.com/users/Muennighoff/repos", "events_url": "https://api.github.com/users/Muennighoff/events{/privacy}", "received_events_url": "https://api.github.com/users/Muennighoff/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892877, "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue", "name": "good first issue", "color": "7057ff", "default": true, "description": "Good for newcomers" }, { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
open
false
null
[]
null
4
"2022-07-18T21:05:33"
"2022-11-23T12:28:35"
null
CONTRIBUTOR
null
## Adding a Dataset - **Name:** WMT21 & WMT22 - **Description:** We are going to have three tracks: two small tasks and a large task. The small tracks evaluate translation between fairly related languages and English (all pairs). The large track uses 101 languages. - **Paper:** / - **Data:** https://statmt.org/wmt21/large-scale-multilingual-translation-task.html https://statmt.org/wmt22/large-scale-multilingual-translation-task.html - **Motivation:** Many more languages than previous WMT versions - Could be very high impact Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/main/ADD_NEW_DATASET.md). I could also tackle this. I saw the existing logic for WMT models is a bit complex (datasets are stored on the wmt account & retrieved in separate wmt datasets afaict). How long do you think it would take me? @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4709/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4709/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4707
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4707/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4707/comments
https://api.github.com/repos/huggingface/datasets/issues/4707/events
https://github.com/huggingface/datasets/issues/4707
1,308,251,405
I_kwDODunzps5N-lUN
4,707
Dataset Viewer issue for TheNoob3131/mosquito-data
{ "login": "thenerd31", "id": 53668030, "node_id": "MDQ6VXNlcjUzNjY4MDMw", "avatar_url": "https://avatars.githubusercontent.com/u/53668030?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thenerd31", "html_url": "https://github.com/thenerd31", "followers_url": "https://api.github.com/users/thenerd31/followers", "following_url": "https://api.github.com/users/thenerd31/following{/other_user}", "gists_url": "https://api.github.com/users/thenerd31/gists{/gist_id}", "starred_url": "https://api.github.com/users/thenerd31/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thenerd31/subscriptions", "organizations_url": "https://api.github.com/users/thenerd31/orgs", "repos_url": "https://api.github.com/users/thenerd31/repos", "events_url": "https://api.github.com/users/thenerd31/events{/privacy}", "received_events_url": "https://api.github.com/users/thenerd31/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
6
"2022-07-18T17:07:19"
"2022-07-18T19:44:46"
"2022-07-18T17:15:50"
NONE
null
### Link _No response_ ### Description Getting this error when trying to view dataset preview: Message: 401, message='Unauthorized', url=URL('https://huggingface.co/datasets/TheNoob3131/mosquito-data/resolve/8aceebd6c4a359d216d10ef020868bd9e8c986dd/0_Africa_train.csv') ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4707/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4707/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4702
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4702/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4702/comments
https://api.github.com/repos/huggingface/datasets/issues/4702/events
https://github.com/huggingface/datasets/issues/4702
1,307,793,811
I_kwDODunzps5N81mT
4,702
Domain specific dataset discovery on the Hugging Face hub
{ "login": "davanstrien", "id": 8995957, "node_id": "MDQ6VXNlcjg5OTU5NTc=", "avatar_url": "https://avatars.githubusercontent.com/u/8995957?v=4", "gravatar_id": "", "url": "https://api.github.com/users/davanstrien", "html_url": "https://github.com/davanstrien", "followers_url": "https://api.github.com/users/davanstrien/followers", "following_url": "https://api.github.com/users/davanstrien/following{/other_user}", "gists_url": "https://api.github.com/users/davanstrien/gists{/gist_id}", "starred_url": "https://api.github.com/users/davanstrien/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/davanstrien/subscriptions", "organizations_url": "https://api.github.com/users/davanstrien/orgs", "repos_url": "https://api.github.com/users/davanstrien/repos", "events_url": "https://api.github.com/users/davanstrien/events{/privacy}", "received_events_url": "https://api.github.com/users/davanstrien/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
9
"2022-07-18T11:14:03"
"2022-07-19T15:18:11"
null
MEMBER
null
**Is your feature request related to a problem? Please describe.** ## The problem The datasets hub currently has `8,239` datasets. These datasets span a wide range of different modalities and tasks (currently with a bias towards textual data). There are various ways of identifying datasets that may be relevant for a particular use case: - searching - various filters Currently, however, there isn't an easy way to identify datasets belonging to a specific domain. For example, I want to browse machine learning datasets related to 'social science' or 'climate change research'. The ability to identify datasets relating to a specific domain has come up in discussions around the [BigLA](https://github.com/bigscience-workshop/lam/) datasets hackathon https://github.com/bigscience-workshop/lam/discussions/31#discussioncomment-3123610. As part of the hackathon, we're currently collecting datasets related to Libraries, Archives and Museums and making them available via the hub. We currently do this under a Hugging Face organization (https://huggingface.co/biglam). However, going forward, I can see some of these datasets being migrated to sit under an organization that is the custodian of the dataset (for example, a national library the data was originally from). At this point, it becomes more difficult to quickly identify datasets from this domain without relying on search. This is also related to some existing issues on Github related to metadata on the hub: - https://github.com/huggingface/datasets/issues/3625 - https://github.com/huggingface/datasets/issues/3877 **Describe the solution you'd like** ### Some possible solutions that may help with this: #### Enable domain tags (from a controlled vocabulary) - This would add metadata field to the YAML for the domain a dataset relates to - Advantages: - the list is controlled, allowing it to be more easily integrated into the datasets tag app (https://huggingface.co/space/huggingface/datasets-tagging) - the controlled vocabulary could align with an existing controlled vocabulary - this additional metadata can be used to perform filtering by domain - disadvantages - choosing the best controlled vocab may be difficult - there are many datasets that are likely to fit into the 'machine learning' domain (i.e. there is a long tail of datasets that aren't in more 'generic' machine learning domain #### Enable topic tags (user-generated) Enable 'free form' topic tags for datasets and models. This would be closer to GitHub's repository topics which can be chosen from a controlled list (https://github.com/topics/) but can also be more user/org specific. This could potentially be useful for organizations to also manage their own models and datasets as the number they hold in their org grows. For example, they may create 'topic tags' for a specific project, so it's clearer which datasets /models are related to that project. #### Collections This solution would likely be the biggest shift and may require significant changes in the hub fronted. Collections could work in several different ways but would include: Users can curate particular datasets, models, spaces, etc., into a collection. For example, they may create a collection of 'historic newspapers suitable for training language models'. These collections would not be mutually exclusive, i.e. a dataset can belong to zero, one or many collections. Collections can also potentially be nested under other collections. This is fairly common on other data reposotiores for example the following collections: <img width="293" alt="Screenshot 2022-07-18 at 11 50 44" src="https://user-images.githubusercontent.com/8995957/179496445-963ed122-5e26-4574-96e8-41081bce3e2b.png"> all belong under a higher level collection (https://bl.iro.bl.uk/collections/353c908d-b495-4413-b047-87236d2573e3?locale=en). There are different models one could use for how these collections could be created: - only within an org - for any dataset/model - the owner or a dataset/model has to agree to be added to a collection - a collection owner can have people suggest additions to their collection - other models.... These collections could be thematic, related to particular training approaches, curate models with particular inference properties etc. Whilst some of these features may duplicate current/or future tag filters on the hub, they offer the advantage of being flexible and not having to predict what users will want to do upfront. There is also potential for automating the creation of these collections based on existing metadata. For example, one could collect models trained on a collection of datasets so for example, if we had a collection of 'historic newspapers suitable for training language models' that contained 30 datasets, we could create another collection 'historic newspaper language models' that takes any model on the hub whose metadata says it used one or more of those 30 datasets. There is also the option of exploring ML approaches to suggest models/datasets may be relevant to a particular collection. This approach is likely to be quite difficult to implement well and would require significant thought. There is also likely to be a benefit in doing quite a bit of upfront work in curating useful collections to demonstrate the benefits of collections. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. It is possible to collate this information externally, i.e. one could link back to the relevant models/datasets from an external platform. **Additional context** Add any other context about the feature request here. I'm cc'ing others involved in the BigLAM hackathon who may also have thoughts @cakiki @clancyoftheoverflow @albertvillanova
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4702/reactions", "total_count": 2, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4702/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4697
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4697/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4697/comments
https://api.github.com/repos/huggingface/datasets/issues/4697/events
https://github.com/huggingface/datasets/issues/4697
1,307,332,253
I_kwDODunzps5N7E6d
4,697
Trouble with streaming frgfm/imagenette vision dataset with TAR archive
{ "login": "frgfm", "id": 26927750, "node_id": "MDQ6VXNlcjI2OTI3NzUw", "avatar_url": "https://avatars.githubusercontent.com/u/26927750?v=4", "gravatar_id": "", "url": "https://api.github.com/users/frgfm", "html_url": "https://github.com/frgfm", "followers_url": "https://api.github.com/users/frgfm/followers", "following_url": "https://api.github.com/users/frgfm/following{/other_user}", "gists_url": "https://api.github.com/users/frgfm/gists{/gist_id}", "starred_url": "https://api.github.com/users/frgfm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/frgfm/subscriptions", "organizations_url": "https://api.github.com/users/frgfm/orgs", "repos_url": "https://api.github.com/users/frgfm/repos", "events_url": "https://api.github.com/users/frgfm/events{/privacy}", "received_events_url": "https://api.github.com/users/frgfm/received_events", "type": "User", "site_admin": false }
[ { "id": 3287858981, "node_id": "MDU6TGFiZWwzMjg3ODU4OTgx", "url": "https://api.github.com/repos/huggingface/datasets/labels/streaming", "name": "streaming", "color": "fef2c0", "default": false, "description": "" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
5
"2022-07-18T02:51:09"
"2022-08-01T15:10:57"
"2022-08-01T15:10:57"
NONE
null
### Link https://huggingface.co/datasets/frgfm/imagenette ### Description Hello there :wave: Thanks for the amazing work you've done with HF Datasets! I've just started playing with it, and managed to upload my first dataset. But for the second one, I'm having trouble with the preview since there is some archive extraction involved :sweat_smile: Basically, I get a: ``` Status code: 400 Exception: NotImplementedError Message: Extraction protocol for TAR archives like 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz' is not implemented in streaming mode. Please use `dl_manager.iter_archive` instead. ``` I've tried several things and checked this issue https://github.com/huggingface/datasets/issues/4181 as well, but no luck so far! Could you point me in the right direction please? :pray: ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4697/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4697/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4696
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4696/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4696/comments
https://api.github.com/repos/huggingface/datasets/issues/4696/events
https://github.com/huggingface/datasets/issues/4696
1,307,183,099
I_kwDODunzps5N6gf7
4,696
Cannot load LinCE dataset
{ "login": "finiteautomata", "id": 167943, "node_id": "MDQ6VXNlcjE2Nzk0Mw==", "avatar_url": "https://avatars.githubusercontent.com/u/167943?v=4", "gravatar_id": "", "url": "https://api.github.com/users/finiteautomata", "html_url": "https://github.com/finiteautomata", "followers_url": "https://api.github.com/users/finiteautomata/followers", "following_url": "https://api.github.com/users/finiteautomata/following{/other_user}", "gists_url": "https://api.github.com/users/finiteautomata/gists{/gist_id}", "starred_url": "https://api.github.com/users/finiteautomata/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/finiteautomata/subscriptions", "organizations_url": "https://api.github.com/users/finiteautomata/orgs", "repos_url": "https://api.github.com/users/finiteautomata/repos", "events_url": "https://api.github.com/users/finiteautomata/events{/privacy}", "received_events_url": "https://api.github.com/users/finiteautomata/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
2
"2022-07-17T19:01:54"
"2022-07-18T09:20:40"
"2022-07-18T07:24:22"
NONE
null
## Describe the bug Cannot load LinCE dataset due to a connection error ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("lince", "ner_spaeng") ``` A notebook with this code and corresponding error can be found at https://colab.research.google.com/drive/1pgX3bNB9amuUwAVfPFm-XuMV5fEg-cD2 ## Expected results It should load the dataset ## Actual results ```python --------------------------------------------------------------------------- ConnectionError Traceback (most recent call last) <ipython-input-2-fc551ddcebef> in <module>() 1 from datasets import load_dataset 2 ----> 3 dataset = load_dataset("lince", "ner_spaeng") 10 frames /usr/local/lib/python3.7/dist-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1682 ignore_verifications=ignore_verifications, 1683 try_from_hf_gcs=try_from_hf_gcs, -> 1684 use_auth_token=use_auth_token, 1685 ) 1686 /usr/local/lib/python3.7/dist-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 703 if not downloaded_from_gcs: 704 self._download_and_prepare( --> 705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 706 ) 707 # Sync info /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos) 1219 1220 def _download_and_prepare(self, dl_manager, verify_infos): -> 1221 super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) 1222 1223 def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 769 split_dict = SplitDict(dataset_name=self.name) 770 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 771 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 772 773 # Checksums verification /root/.cache/huggingface/modules/datasets_modules/datasets/lince/10d41747f55f0849fa84ac579ea1acfa7df49aa2015b60426bc459c111b3d589/lince.py in _split_generators(self, dl_manager) 481 def _split_generators(self, dl_manager): 482 """Returns SplitGenerators.""" --> 483 lince_dir = dl_manager.download_and_extract(f"{_LINCE_URL}/{self.config.name}.zip") 484 data_dir = os.path.join(lince_dir, self.config.data_dir) 485 return [ /usr/local/lib/python3.7/dist-packages/datasets/download/download_manager.py in download_and_extract(self, url_or_urls) 429 extracted_path(s): `str`, extracted paths of given URL(s). 430 """ --> 431 return self.extract(self.download(url_or_urls)) 432 433 def get_recorded_sizes_checksums(self): /usr/local/lib/python3.7/dist-packages/datasets/download/download_manager.py in download(self, url_or_urls) 313 num_proc=download_config.num_proc, 314 disable_tqdm=not is_progress_bar_enabled(), --> 315 desc="Downloading data files", 316 ) 317 duration = datetime.now() - start_time /usr/local/lib/python3.7/dist-packages/datasets/utils/py_utils.py in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, types, disable_tqdm, desc) 346 # Singleton 347 if not isinstance(data_struct, dict) and not isinstance(data_struct, types): --> 348 return function(data_struct) 349 350 disable_tqdm = disable_tqdm or not logging.is_progress_bar_enabled() /usr/local/lib/python3.7/dist-packages/datasets/download/download_manager.py in _download(self, url_or_filename, download_config) 333 # append the relative path to the base_path 334 url_or_filename = url_or_path_join(self._base_path, url_or_filename) --> 335 return cached_path(url_or_filename, download_config=download_config) 336 337 def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): /usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py in cached_path(url_or_filename, download_config, **download_kwargs) 195 use_auth_token=download_config.use_auth_token, 196 ignore_url_params=download_config.ignore_url_params, --> 197 download_desc=download_config.download_desc, 198 ) 199 elif os.path.exists(url_or_filename): /usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py in get_from_cache(url, cache_dir, force_download, proxies, etag_timeout, resume_download, user_agent, local_files_only, use_etag, max_retries, use_auth_token, ignore_url_params, download_desc) 531 _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") 532 if head_error is not None: --> 533 raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") 534 elif response is not None: 535 raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") ConnectionError: Couldn't reach https://ritual.uh.edu/lince/libaccess/eyJ1c2VybmFtZSI6ICJodWdnaW5nZmFjZSBubHAiLCAidXNlcl9pZCI6IDExMSwgImVtYWlsIjogImR1bW15QGVtYWlsLmNvbSJ9/ner_spaeng.zip (ConnectTimeout(MaxRetryError("HTTPSConnectionPool(host='ritual.uh.edu', port=443): Max retries exceeded with url: /lince/libaccess/eyJ1c2VybmFtZSI6ICJodWdnaW5nZmFjZSBubHAiLCAidXNlcl9pZCI6IDExMSwgImVtYWlsIjogImR1bW15QGVtYWlsLmNvbSJ9/ner_spaeng.zip (Caused by ConnectTimeoutError(<urllib3.connection.HTTPSConnection object at 0x7feb1c45a690>, 'Connection to ritual.uh.edu timed out. (connect timeout=100)'))"))) ``` ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.13 - PyArrow version: 6.0.1 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4696/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4696/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4694
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4694/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4694/comments
https://api.github.com/repos/huggingface/datasets/issues/4694/events
https://github.com/huggingface/datasets/issues/4694
1,306,958,380
I_kwDODunzps5N5pos
4,694
Distributed data parallel training for streaming datasets
{ "login": "cyk1337", "id": 13767887, "node_id": "MDQ6VXNlcjEzNzY3ODg3", "avatar_url": "https://avatars.githubusercontent.com/u/13767887?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cyk1337", "html_url": "https://github.com/cyk1337", "followers_url": "https://api.github.com/users/cyk1337/followers", "following_url": "https://api.github.com/users/cyk1337/following{/other_user}", "gists_url": "https://api.github.com/users/cyk1337/gists{/gist_id}", "starred_url": "https://api.github.com/users/cyk1337/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cyk1337/subscriptions", "organizations_url": "https://api.github.com/users/cyk1337/orgs", "repos_url": "https://api.github.com/users/cyk1337/repos", "events_url": "https://api.github.com/users/cyk1337/events{/privacy}", "received_events_url": "https://api.github.com/users/cyk1337/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
2
"2022-07-17T01:29:43"
"2022-10-24T16:22:17"
null
NONE
null
### Feature request Any documentations for the the `load_dataset(streaming=True)` for (multi-node multi-GPU) DDP training? ### Motivation Given a bunch of data files, it is expected to split them onto different GPUs. Is there a guide or documentation? ### Your contribution Does it requires manually split on data files for each worker in `DatasetBuilder._split_generator()`? What is`IterableDatasetShard` expected to do?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4694/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4694/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4692
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4692/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4692/comments
https://api.github.com/repos/huggingface/datasets/issues/4692/events
https://github.com/huggingface/datasets/issues/4692
1,306,609,680
I_kwDODunzps5N4UgQ
4,692
Unable to cast a column with `Image()` by using the `cast_column()` feature
{ "login": "skrishnan99", "id": 28833916, "node_id": "MDQ6VXNlcjI4ODMzOTE2", "avatar_url": "https://avatars.githubusercontent.com/u/28833916?v=4", "gravatar_id": "", "url": "https://api.github.com/users/skrishnan99", "html_url": "https://github.com/skrishnan99", "followers_url": "https://api.github.com/users/skrishnan99/followers", "following_url": "https://api.github.com/users/skrishnan99/following{/other_user}", "gists_url": "https://api.github.com/users/skrishnan99/gists{/gist_id}", "starred_url": "https://api.github.com/users/skrishnan99/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/skrishnan99/subscriptions", "organizations_url": "https://api.github.com/users/skrishnan99/orgs", "repos_url": "https://api.github.com/users/skrishnan99/repos", "events_url": "https://api.github.com/users/skrishnan99/events{/privacy}", "received_events_url": "https://api.github.com/users/skrishnan99/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
"2022-07-15T22:56:03"
"2022-07-19T13:36:24"
"2022-07-19T13:36:24"
NONE
null
## Describe the bug A clear and concise description of what the bug is. When I create a dataset, then add a column to the created dataset through the `dataset.add_column` feature and then try to cast a column of the dataset (this column contains image paths) with `Image()` by using the `cast_column()` feature, I get the following error - ``` TypeError: Couldn't cast array of type string to {'bytes': Value(dtype='binary', id=None), 'path': Value(dtype='string', id=None)} ``` When I try and cast the same column, but without doing the `add_column` in the previous step, it works as expected. ## Steps to reproduce the bug ```python from datasets import Dataset, Image data_dict = { "img_path": ["https://picsum.photos/200/300"] } dataset = Dataset.from_dict(data_dict) #NOTE Comment out this line and use cast_column and it works properly dataset = dataset.add_column("yeet", [1]) #NOTE This line fails to execute properly if `add_column` is called before dataset = dataset.cast_column("img_path", Image()) # #NOTE This is my current workaround. This seems to work fine with/without `add_column`. While # # running this, make sure to comment out the `cast_column` line # new_features = dataset.features.copy() # new_features["img_path"] = Image() # dataset = dataset.cast(new_features) print(dataset) print(dataset.features) print(dataset[0]) ``` ## Expected results A clear and concise description of the expected results. Able to successfully use `cast_column` to cast a column containing img_paths to now be Image() features after modifying the dataset using `add_column` in a previous step ## Actual results Specify the actual results or traceback. ``` Traceback (most recent call last): File "/home/surya/Desktop/hf_bug_test.py", line 14, in <module> dataset = dataset.cast_column("img_path", Image()) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/fingerprint.py", line 458, in wrapper out = func(self, *args, **kwargs) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 1580, in cast_column dataset._data = dataset._data.cast(dataset.features.arrow_schema) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1487, in cast new_tables.append(subtable.cast(subschema, *args, **kwargs)) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 834, in cast return InMemoryTable(table_cast(self.table, *args, **kwargs)) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1897, in table_cast return cast_table_to_schema(table, schema) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1880, in cast_table_to_schema arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1880, in <listcomp> arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1673, in wrapper return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1673, in <listcomp> return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) File "/home/surya/anaconda3/envs/snap_test/lib/python3.9/site-packages/datasets/table.py", line 1846, in cast_array_to_feature raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}") TypeError: Couldn't cast array of type string to {'bytes': Value(dtype='binary', id=None), 'path': Value(dtype='string', id=None)} ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Ubuntu 20.04.3 LTS - Python version: 3.9.7 - PyArrow version: 7.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4692/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4692/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4691
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4691/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4691/comments
https://api.github.com/repos/huggingface/datasets/issues/4691/events
https://github.com/huggingface/datasets/issues/4691
1,306,389,656
I_kwDODunzps5N3eyY
4,691
Dataset Viewer issue for rajistics/indian_food_images
{ "login": "rajshah4", "id": 6808012, "node_id": "MDQ6VXNlcjY4MDgwMTI=", "avatar_url": "https://avatars.githubusercontent.com/u/6808012?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rajshah4", "html_url": "https://github.com/rajshah4", "followers_url": "https://api.github.com/users/rajshah4/followers", "following_url": "https://api.github.com/users/rajshah4/following{/other_user}", "gists_url": "https://api.github.com/users/rajshah4/gists{/gist_id}", "starred_url": "https://api.github.com/users/rajshah4/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rajshah4/subscriptions", "organizations_url": "https://api.github.com/users/rajshah4/orgs", "repos_url": "https://api.github.com/users/rajshah4/repos", "events_url": "https://api.github.com/users/rajshah4/events{/privacy}", "received_events_url": "https://api.github.com/users/rajshah4/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-15T19:03:15"
"2022-07-18T15:02:03"
"2022-07-18T15:02:03"
NONE
null
### Link https://huggingface.co/datasets/rajistics/indian_food_images/viewer/rajistics--indian_food_images/train ### Description I have a train/test split in my dataset <img width="410" alt="Screen Shot 2022-07-15 at 11 44 42 AM" src="https://user-images.githubusercontent.com/6808012/179293215-7b419ec3-3527-46f2-8dad-adbc5568cfa0.png"> t The dataset viewer works for the test split (images of indian food), but does not show my train split. My guess is maybe there is some corrupt image file that is guessing this. But I have no idea. The original dataset was pulled from here: https://www.kaggle.com/datasets/l33tc0d3r/indian-food-classification?resource=download-directory ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4691/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4691/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4684
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4684/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4684/comments
https://api.github.com/repos/huggingface/datasets/issues/4684/events
https://github.com/huggingface/datasets/issues/4684
1,305,554,654
I_kwDODunzps5N0S7e
4,684
How to assign new values to Dataset?
{ "login": "beyondguo", "id": 37113676, "node_id": "MDQ6VXNlcjM3MTEzNjc2", "avatar_url": "https://avatars.githubusercontent.com/u/37113676?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beyondguo", "html_url": "https://github.com/beyondguo", "followers_url": "https://api.github.com/users/beyondguo/followers", "following_url": "https://api.github.com/users/beyondguo/following{/other_user}", "gists_url": "https://api.github.com/users/beyondguo/gists{/gist_id}", "starred_url": "https://api.github.com/users/beyondguo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beyondguo/subscriptions", "organizations_url": "https://api.github.com/users/beyondguo/orgs", "repos_url": "https://api.github.com/users/beyondguo/repos", "events_url": "https://api.github.com/users/beyondguo/events{/privacy}", "received_events_url": "https://api.github.com/users/beyondguo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
1
"2022-07-15T04:17:57"
"2022-10-10T11:53:38"
"2022-10-10T11:53:38"
NONE
null
![image](https://user-images.githubusercontent.com/37113676/179149159-bbbda0c8-a661-403c-87ed-dc2b4219cd68.png) Hi, if I want to change some values of the dataset, or add new columns to it, how can I do it? For example, I want to change all the labels of the SST2 dataset to `0`: ```python from datasets import load_dataset data = load_dataset('glue','sst2') data['train']['label'] = [0]*len(data) ``` I will get the error: ``` TypeError: 'Dataset' object does not support item assignment ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4684/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4684/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4682
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4682/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4682/comments
https://api.github.com/repos/huggingface/datasets/issues/4682/events
https://github.com/huggingface/datasets/issues/4682
1,304,788,215
I_kwDODunzps5NxXz3
4,682
weird issue/bug with columns (dataset iterable/stream mode)
{ "login": "eunseojo", "id": 12104720, "node_id": "MDQ6VXNlcjEyMTA0NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/12104720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eunseojo", "html_url": "https://github.com/eunseojo", "followers_url": "https://api.github.com/users/eunseojo/followers", "following_url": "https://api.github.com/users/eunseojo/following{/other_user}", "gists_url": "https://api.github.com/users/eunseojo/gists{/gist_id}", "starred_url": "https://api.github.com/users/eunseojo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eunseojo/subscriptions", "organizations_url": "https://api.github.com/users/eunseojo/orgs", "repos_url": "https://api.github.com/users/eunseojo/repos", "events_url": "https://api.github.com/users/eunseojo/events{/privacy}", "received_events_url": "https://api.github.com/users/eunseojo/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
0
"2022-07-14T13:26:47"
"2022-07-14T13:26:47"
null
CONTRIBUTOR
null
I have a dataset online (CloverSearch/cc-news-mutlilingual) that has a bunch of columns, two of which are "score_title_maintext" and "score_title_description". the original files are jsonl formatted. I was trying to iterate through via streaming mode and grab all "score_title_description" values, but I kept getting key not found after a certain point of iteration. I found that some json objects in the file don't have "score_title_description". And in SOME cases, this returns a NONE and in others it just gets a key error. Why is there an inconsistency here and how can I fix it?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4682/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4682/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4681
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4681/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4681/comments
https://api.github.com/repos/huggingface/datasets/issues/4681/events
https://github.com/huggingface/datasets/issues/4681
1,304,617,484
I_kwDODunzps5NwuIM
4,681
IndexError when loading ImageFolder
{ "login": "johko", "id": 2843485, "node_id": "MDQ6VXNlcjI4NDM0ODU=", "avatar_url": "https://avatars.githubusercontent.com/u/2843485?v=4", "gravatar_id": "", "url": "https://api.github.com/users/johko", "html_url": "https://github.com/johko", "followers_url": "https://api.github.com/users/johko/followers", "following_url": "https://api.github.com/users/johko/following{/other_user}", "gists_url": "https://api.github.com/users/johko/gists{/gist_id}", "starred_url": "https://api.github.com/users/johko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/johko/subscriptions", "organizations_url": "https://api.github.com/users/johko/orgs", "repos_url": "https://api.github.com/users/johko/repos", "events_url": "https://api.github.com/users/johko/events{/privacy}", "received_events_url": "https://api.github.com/users/johko/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
null
2
"2022-07-14T10:57:55"
"2022-07-25T12:37:54"
"2022-07-25T12:37:54"
NONE
null
## Describe the bug Loading an image dataset with `imagefolder` throws `IndexError: list index out of range` when the given folder contains a non-image file (like a csv). ## Steps to reproduce the bug Put a csv file in a folder with images and load it: ```python import datasets datasets.load_dataset("imagefolder", data_dir=path/to/folder) ``` ## Expected results I would expect a better error message, like `Unsupported file` or even the dataset loader just ignoring every file that is not an image in that case. ## Actual results Here is the whole traceback: ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Linux-5.11.0-051100-generic-x86_64-with-glibc2.27 - Python version: 3.9.9 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4681/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4681/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4680
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4680/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4680/comments
https://api.github.com/repos/huggingface/datasets/issues/4680/events
https://github.com/huggingface/datasets/issues/4680
1,304,534,770
I_kwDODunzps5NwZ7y
4,680
Dataset Viewer issue for codeparrot/xlcost-text-to-code
{ "login": "loubnabnl", "id": 44069155, "node_id": "MDQ6VXNlcjQ0MDY5MTU1", "avatar_url": "https://avatars.githubusercontent.com/u/44069155?v=4", "gravatar_id": "", "url": "https://api.github.com/users/loubnabnl", "html_url": "https://github.com/loubnabnl", "followers_url": "https://api.github.com/users/loubnabnl/followers", "following_url": "https://api.github.com/users/loubnabnl/following{/other_user}", "gists_url": "https://api.github.com/users/loubnabnl/gists{/gist_id}", "starred_url": "https://api.github.com/users/loubnabnl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/loubnabnl/subscriptions", "organizations_url": "https://api.github.com/users/loubnabnl/orgs", "repos_url": "https://api.github.com/users/loubnabnl/repos", "events_url": "https://api.github.com/users/loubnabnl/events{/privacy}", "received_events_url": "https://api.github.com/users/loubnabnl/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
5
"2022-07-14T09:45:50"
"2022-07-18T16:37:00"
"2022-07-18T16:04:36"
NONE
null
### Link https://huggingface.co/datasets/codeparrot/xlcost-text-to-code ### Description Error ``` Server Error Status code: 400 Exception: TypeError Message: 'NoneType' object is not iterable ``` Before I did a minor change in the dataset script (removing some comments), the viewer was working but not properely, it wasn't showing the dataset subsets. But the data can be loaded successfully. Thanks! ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4680/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4680/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4678
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4678/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4678/comments
https://api.github.com/repos/huggingface/datasets/issues/4678/events
https://github.com/huggingface/datasets/issues/4678
1,303,741,432
I_kwDODunzps5NtYP4
4,678
Cant pass streaming dataset to dataloader after take()
{ "login": "zankner", "id": 39166683, "node_id": "MDQ6VXNlcjM5MTY2Njgz", "avatar_url": "https://avatars.githubusercontent.com/u/39166683?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zankner", "html_url": "https://github.com/zankner", "followers_url": "https://api.github.com/users/zankner/followers", "following_url": "https://api.github.com/users/zankner/following{/other_user}", "gists_url": "https://api.github.com/users/zankner/gists{/gist_id}", "starred_url": "https://api.github.com/users/zankner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zankner/subscriptions", "organizations_url": "https://api.github.com/users/zankner/orgs", "repos_url": "https://api.github.com/users/zankner/repos", "events_url": "https://api.github.com/users/zankner/events{/privacy}", "received_events_url": "https://api.github.com/users/zankner/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
1
"2022-07-13T17:34:18"
"2022-07-14T13:07:21"
null
NONE
null
## Describe the bug I am trying to pass a streaming version of c4 to a dataloader, but it can't be passed after I call `dataset.take(n)`. Some functions such as `shuffle()` can be applied without breaking the dataloader but not take. ## Steps to reproduce the bug ```python import datasets import torch dset = datasets.load_dataset(path='c4', name='en', split="train", streaming=True) dset = dset.take(50_000) dset = dset.with_format("torch") num_workers = 8 batch_size = 512 loader = torch.utils.data.DataLoader(dataset=dset, batch_size=batch_size, num_workers=num_workers) for batch in loader: ... ``` ## Expected results No error thrown when iterating over the dataloader ## Actual results Original Traceback (most recent call last): File "/usr/local/lib/python3.9/dist-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop data = fetcher.fetch(index) File "/usr/local/lib/python3.9/dist-packages/torch/utils/data/_utils/fetch.py", line 32, in fetch data.append(next(self.dataset_iter)) File "/root/.local/lib/python3.9/site-packages/datasets/formatting/dataset_wrappers/torch_iterable_dataset.py", line 48, in __iter__ for key, example in self._iter_shard(shard_idx): File "/root/.local/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 586, in _iter_shard yield from ex_iterable.shard_data_sources(shard_idx) File "/root/.local/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 60, in shard_data_sources raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet") NotImplementedError: <class 'datasets.iterable_dataset.TakeExamplesIterable'> doesn't implement shard_data_sources yet ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.4.0-120-generic-x86_64-with-glibc2.31 - Python version: 3.9.13 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4678/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/huggingface/datasets/issues/4678/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4677
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4677/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4677/comments
https://api.github.com/repos/huggingface/datasets/issues/4677/events
https://github.com/huggingface/datasets/issues/4677
1,302,258,440
I_kwDODunzps5NnuMI
4,677
Random 400 Client Error when pushing dataset
{ "login": "msis", "id": 577139, "node_id": "MDQ6VXNlcjU3NzEzOQ==", "avatar_url": "https://avatars.githubusercontent.com/u/577139?v=4", "gravatar_id": "", "url": "https://api.github.com/users/msis", "html_url": "https://github.com/msis", "followers_url": "https://api.github.com/users/msis/followers", "following_url": "https://api.github.com/users/msis/following{/other_user}", "gists_url": "https://api.github.com/users/msis/gists{/gist_id}", "starred_url": "https://api.github.com/users/msis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/msis/subscriptions", "organizations_url": "https://api.github.com/users/msis/orgs", "repos_url": "https://api.github.com/users/msis/repos", "events_url": "https://api.github.com/users/msis/events{/privacy}", "received_events_url": "https://api.github.com/users/msis/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
"2022-07-12T15:56:44"
"2022-10-06T14:18:54"
null
NONE
null
## Describe the bug When pushing a dataset, the client errors randomly with `Bad Request for url:...`. At the next call, a new parquet file is created for each shard. The client may fail at any random shard. ## Steps to reproduce the bug ```python dataset.push_to_hub("ORG/DATASET", private=True, branch="main") ``` ## Expected results Push all the dataset to the Hub with no duplicates. If it fails, it should retry or fail, but continue from the last failed shard. ## Actual results ``` --------------------------------------------------------------------------- HTTPError Traceback (most recent call last) testing.ipynb Cell 29 in <cell line: 1>() ----> [1](testing.ipynb?line=0) dataset.push_to_hub("ORG/DATASET", private=True, branch="main") File ~/.local/lib/python3.9/site-packages/datasets/arrow_dataset.py:4297, in Dataset.push_to_hub(self, repo_id, split, private, token, branch, max_shard_size, shard_size, embed_external_files) 4291 warnings.warn( 4292 "'shard_size' was renamed to 'max_shard_size' in version 2.1.1 and will be removed in 2.4.0.", 4293 FutureWarning, 4294 ) 4295 max_shard_size = shard_size -> 4297 repo_id, split, uploaded_size, dataset_nbytes, repo_files, deleted_size = self._push_parquet_shards_to_hub( 4298 repo_id=repo_id, 4299 split=split, 4300 private=private, 4301 token=token, 4302 branch=branch, 4303 max_shard_size=max_shard_size, 4304 embed_external_files=embed_external_files, 4305 ) 4306 organization, dataset_name = repo_id.split("/") 4307 info_to_dump = self.info.copy() File ~/.local/lib/python3.9/site-packages/datasets/arrow_dataset.py:4195, in Dataset._push_parquet_shards_to_hub(self, repo_id, split, private, token, branch, max_shard_size, embed_external_files) 4193 shard.to_parquet(buffer) 4194 uploaded_size += buffer.tell() -> 4195 _retry( 4196 api.upload_file, 4197 func_kwargs=dict( 4198 path_or_fileobj=buffer.getvalue(), 4199 path_in_repo=shard_path_in_repo, 4200 repo_id=repo_id, 4201 token=token, 4202 repo_type="dataset", 4203 revision=branch, 4204 identical_ok=False, 4205 ), 4206 exceptions=HTTPError, 4207 status_codes=[504], 4208 base_wait_time=2.0, 4209 max_retries=5, 4210 max_wait_time=20.0, 4211 ) 4212 shards_path_in_repo.append(shard_path_in_repo) 4214 # Cleanup to remove unused files File ~/.local/lib/python3.9/site-packages/datasets/utils/file_utils.py:284, in _retry(func, func_args, func_kwargs, exceptions, status_codes, max_retries, base_wait_time, max_wait_time) 282 except exceptions as err: 283 if retry >= max_retries or (status_codes and err.response.status_code not in status_codes): --> 284 raise err 285 else: 286 sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff File ~/.local/lib/python3.9/site-packages/datasets/utils/file_utils.py:281, in _retry(func, func_args, func_kwargs, exceptions, status_codes, max_retries, base_wait_time, max_wait_time) 279 while True: 280 try: --> 281 return func(*func_args, **func_kwargs) 282 except exceptions as err: 283 if retry >= max_retries or (status_codes and err.response.status_code not in status_codes): File ~/.local/lib/python3.9/site-packages/huggingface_hub/hf_api.py:1967, in HfApi.upload_file(self, path_or_fileobj, path_in_repo, repo_id, token, repo_type, revision, identical_ok, commit_message, commit_description, create_pr) 1957 commit_message = ( 1958 commit_message 1959 if commit_message is not None 1960 else f"Upload {path_in_repo} with huggingface_hub" 1961 ) 1962 operation = CommitOperationAdd( 1963 path_or_fileobj=path_or_fileobj, 1964 path_in_repo=path_in_repo, 1965 ) -> 1967 pr_url = self.create_commit( 1968 repo_id=repo_id, 1969 repo_type=repo_type, 1970 operations=[operation], 1971 commit_message=commit_message, 1972 commit_description=commit_description, 1973 token=token, 1974 revision=revision, 1975 create_pr=create_pr, 1976 ) 1977 if pr_url is not None: 1978 re_match = re.match(REGEX_DISCUSSION_URL, pr_url) File ~/.local/lib/python3.9/site-packages/huggingface_hub/hf_api.py:1844, in HfApi.create_commit(self, repo_id, operations, commit_message, commit_description, token, repo_type, revision, create_pr, num_threads) 1836 commit_url = f"{self.endpoint}/api/{repo_type}s/{repo_id}/commit/{revision}" 1838 commit_resp = requests.post( 1839 url=commit_url, 1840 headers={"Authorization": f"Bearer {token}"}, 1841 json=commit_payload, 1842 params={"create_pr": 1} if create_pr else None, 1843 ) -> 1844 _raise_for_status(commit_resp) 1845 return commit_resp.json().get("pullRequestUrl", None) File ~/.local/lib/python3.9/site-packages/huggingface_hub/utils/_errors.py:84, in _raise_for_status(request) 76 if request.status_code == 401: 77 # The repo was not found and the user is not Authenticated 78 raise RepositoryNotFoundError( 79 f"401 Client Error: Repository Not Found for url: {request.url}. If the" 80 " repo is private, make sure you are authenticated. (Request ID:" 81 f" {request_id})" 82 ) ---> 84 _raise_with_request_id(request) File ~/.local/lib/python3.9/site-packages/huggingface_hub/utils/_errors.py:95, in _raise_with_request_id(request) 92 if request_id is not None and len(e.args) > 0 and isinstance(e.args[0], str): 93 e.args = (e.args[0] + f" (Request ID: {request_id})",) + e.args[1:] ---> 95 raise e File ~/.local/lib/python3.9/site-packages/huggingface_hub/utils/_errors.py:90, in _raise_with_request_id(request) 88 request_id = request.headers.get("X-Request-Id") 89 try: ---> 90 request.raise_for_status() 91 except Exception as e: 92 if request_id is not None and len(e.args) > 0 and isinstance(e.args[0], str): File ~/.local/lib/python3.9/site-packages/requests/models.py:1021, in Response.raise_for_status(self) 1016 http_error_msg = ( 1017 f"{self.status_code} Server Error: {reason} for url: {self.url}" 1018 ) 1020 if http_error_msg: -> 1021 raise HTTPError(http_error_msg, response=self) HTTPError: 400 Client Error: Bad Request for url: https://huggingface.co/api/datasets/ORG/DATASET/commit/main (Request ID: a_F0IQAHJdxGKVRYyu1cF) ``` ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.13.0-1025-aws-x86_64-with-glibc2.31 - Python version: 3.9.4 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4677/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4677/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4676
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4676/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4676/comments
https://api.github.com/repos/huggingface/datasets/issues/4676/events
https://github.com/huggingface/datasets/issues/4676
1,302,202,028
I_kwDODunzps5Nngas
4,676
Dataset.map gets stuck on _cast_to_python_objects
{ "login": "srobertjames", "id": 662612, "node_id": "MDQ6VXNlcjY2MjYxMg==", "avatar_url": "https://avatars.githubusercontent.com/u/662612?v=4", "gravatar_id": "", "url": "https://api.github.com/users/srobertjames", "html_url": "https://github.com/srobertjames", "followers_url": "https://api.github.com/users/srobertjames/followers", "following_url": "https://api.github.com/users/srobertjames/following{/other_user}", "gists_url": "https://api.github.com/users/srobertjames/gists{/gist_id}", "starred_url": "https://api.github.com/users/srobertjames/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/srobertjames/subscriptions", "organizations_url": "https://api.github.com/users/srobertjames/orgs", "repos_url": "https://api.github.com/users/srobertjames/repos", "events_url": "https://api.github.com/users/srobertjames/events{/privacy}", "received_events_url": "https://api.github.com/users/srobertjames/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" }, { "id": 1935892877, "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue", "name": "good first issue", "color": "7057ff", "default": true, "description": "Good for newcomers" } ]
closed
false
{ "login": "szmoro", "id": 5697926, "node_id": "MDQ6VXNlcjU2OTc5MjY=", "avatar_url": "https://avatars.githubusercontent.com/u/5697926?v=4", "gravatar_id": "", "url": "https://api.github.com/users/szmoro", "html_url": "https://github.com/szmoro", "followers_url": "https://api.github.com/users/szmoro/followers", "following_url": "https://api.github.com/users/szmoro/following{/other_user}", "gists_url": "https://api.github.com/users/szmoro/gists{/gist_id}", "starred_url": "https://api.github.com/users/szmoro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/szmoro/subscriptions", "organizations_url": "https://api.github.com/users/szmoro/orgs", "repos_url": "https://api.github.com/users/szmoro/repos", "events_url": "https://api.github.com/users/szmoro/events{/privacy}", "received_events_url": "https://api.github.com/users/szmoro/received_events", "type": "User", "site_admin": false }
[ { "login": "szmoro", "id": 5697926, "node_id": "MDQ6VXNlcjU2OTc5MjY=", "avatar_url": "https://avatars.githubusercontent.com/u/5697926?v=4", "gravatar_id": "", "url": "https://api.github.com/users/szmoro", "html_url": "https://github.com/szmoro", "followers_url": "https://api.github.com/users/szmoro/followers", "following_url": "https://api.github.com/users/szmoro/following{/other_user}", "gists_url": "https://api.github.com/users/szmoro/gists{/gist_id}", "starred_url": "https://api.github.com/users/szmoro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/szmoro/subscriptions", "organizations_url": "https://api.github.com/users/szmoro/orgs", "repos_url": "https://api.github.com/users/szmoro/repos", "events_url": "https://api.github.com/users/szmoro/events{/privacy}", "received_events_url": "https://api.github.com/users/szmoro/received_events", "type": "User", "site_admin": false } ]
null
9
"2022-07-12T15:09:58"
"2022-10-03T13:01:04"
"2022-10-03T13:01:03"
NONE
null
## Describe the bug `Dataset.map`, when fed a Huggingface Tokenizer as its map func, can sometimes spend huge amounts of time doing casts. A minimal example follows. Not all usages suffer from this. For example, I profiled the preprocessor at https://github.com/huggingface/notebooks/blob/main/examples/question_answering.ipynb , and it did _not_ have this problem. However, I'm at a loss to figure out how it avoids it, as the example below is simple and minimal and still has this problem. This casting, where it occurs, causes the `Dataset.map` to run approximately 7x slower than it runs for code which does not cause this casting. This may be related to https://github.com/huggingface/datasets/issues/1046 . However, the tokenizer is _not_ set to return Tensors. ## Steps to reproduce the bug A minimal, self-contained example to reproduce is below: ```python import transformers from transformers import AutoTokenizer from datasets import load_dataset import torch import cProfile pretrained = 'distilbert-base-uncased' tokenizer = AutoTokenizer.from_pretrained(pretrained) squad = load_dataset('squad') squad_train = squad['train'] squad_tiny = squad_train.select(range(5000)) assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast) def tokenize(ds): tokens = tokenizer(text=ds['question'], text_pair=ds['context'], add_special_tokens=True, padding='max_length', truncation='only_second', max_length=160, stride=32, return_overflowing_tokens=True, return_offsets_mapping=True, ) return tokens cmd = 'squad_tiny.map(tokenize, batched=True, remove_columns=squad_tiny.column_names)' cProfile.run(cmd, sort='tottime') ``` ## Actual results The code works, but takes 10-25 sec per batch (about 7x slower than non-casting code), with the following profile. Note that `_cast_to_python_objects` is the culprit. ``` 63524075 function calls (58206482 primitive calls) in 121.836 seconds Ordered by: internal time ncalls tottime percall cumtime percall filename:lineno(function) 5274034/40 68.751 0.000 111.060 2.776 features.py:262(_cast_to_python_objects) 42223832 24.077 0.000 33.310 0.000 {built-in method builtins.isinstance} 16338/20 5.121 0.000 111.053 5.553 features.py:361(<listcomp>) 5274135 4.747 0.000 4.749 0.000 {built-in method _abc._abc_instancecheck} 80/40 4.731 0.059 116.292 2.907 {pyarrow.lib.array} 5274135 4.485 0.000 9.234 0.000 abc.py:96(__instancecheck__) 2661564/2645196 2.959 0.000 4.298 0.000 features.py:1081(_check_non_null_non_empty_recursive) 5 2.786 0.557 2.786 0.557 {method 'encode_batch' of 'tokenizers.Tokenizer' objects} 2668052 0.930 0.000 0.930 0.000 {built-in method builtins.len} 5000 0.930 0.000 0.938 0.000 tokenization_utils_fast.py:187(_convert_encoding) 5 0.750 0.150 0.808 0.162 {method 'to_pydict' of 'pyarrow.lib.Table' objects} 1 0.444 0.444 121.749 121.749 arrow_dataset.py:2501(_map_single) 40 0.375 0.009 116.291 2.907 arrow_writer.py:151(__arrow_array__) 10 0.066 0.007 0.066 0.007 {method 'write_batch' of 'pyarrow.lib._CRecordBatchWriter' objects} 1 0.060 0.060 121.835 121.835 fingerprint.py:409(wrapper) 11387/5715 0.049 0.000 0.175 0.000 {built-in method builtins.getattr} 36 0.049 0.001 0.049 0.001 {pyarrow._compute.call_function} 15000 0.040 0.000 0.040 0.000 _collections_abc.py:719(__iter__) 3 0.023 0.008 0.023 0.008 {built-in method _imp.create_dynamic} 77 0.020 0.000 0.020 0.000 {built-in method builtins.dir} 37 0.019 0.001 0.019 0.001 socket.py:543(send) 15 0.017 0.001 0.017 0.001 tokenization_utils_fast.py:460(<listcomp>) 432/421 0.015 0.000 0.024 0.000 traitlets.py:1388(_notify_observers) 5000 0.015 0.000 0.018 0.000 _collections_abc.py:672(keys) 51 0.014 0.000 0.042 0.001 traitlets.py:276(getmembers) 5 0.014 0.003 3.775 0.755 tokenization_utils_fast.py:392(_batch_encode_plus) 3/1 0.014 0.005 0.035 0.035 {built-in method _imp.exec_dynamic} 5 0.012 0.002 0.950 0.190 tokenization_utils_fast.py:438(<listcomp>) 31626 0.012 0.000 0.012 0.000 {method 'append' of 'list' objects} 1532/1001 0.011 0.000 0.189 0.000 traitlets.py:643(get) 5 0.009 0.002 3.796 0.759 arrow_dataset.py:2631(apply_function_on_filtered_inputs) 51 0.009 0.000 0.062 0.001 traitlets.py:1766(traits) 5 0.008 0.002 3.784 0.757 tokenization_utils_base.py:2632(batch_encode_plus) 368 0.007 0.000 0.044 0.000 traitlets.py:1715(_get_trait_default_generator) 26 0.007 0.000 0.022 0.001 traitlets.py:1186(setup_instance) 51 0.006 0.000 0.010 0.000 traitlets.py:1781(<listcomp>) 80/32 0.006 0.000 0.052 0.002 table.py:1758(cast_array_to_feature) 684 0.006 0.000 0.007 0.000 {method 'items' of 'dict' objects} 4344/1794 0.006 0.000 0.192 0.000 traitlets.py:675(__get__) ... ``` ## Environment info I observed this on both Google colab and my local workstation: ### Google colab - `datasets` version: 2.3.2 - Platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.13 - PyArrow version: 6.0.1 - Pandas version: 1.3.5 ### Local - `datasets` version: 2.3.2 - Platform: Windows-7-6.1.7601-SP1 - Python version: 3.8.10 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4676/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4676/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4675
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4675/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4675/comments
https://api.github.com/repos/huggingface/datasets/issues/4675/events
https://github.com/huggingface/datasets/issues/4675
1,302,193,649
I_kwDODunzps5NneXx
4,675
Unable to use dataset with PyTorch dataloader
{ "login": "BlueskyFR", "id": 25421460, "node_id": "MDQ6VXNlcjI1NDIxNDYw", "avatar_url": "https://avatars.githubusercontent.com/u/25421460?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BlueskyFR", "html_url": "https://github.com/BlueskyFR", "followers_url": "https://api.github.com/users/BlueskyFR/followers", "following_url": "https://api.github.com/users/BlueskyFR/following{/other_user}", "gists_url": "https://api.github.com/users/BlueskyFR/gists{/gist_id}", "starred_url": "https://api.github.com/users/BlueskyFR/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BlueskyFR/subscriptions", "organizations_url": "https://api.github.com/users/BlueskyFR/orgs", "repos_url": "https://api.github.com/users/BlueskyFR/repos", "events_url": "https://api.github.com/users/BlueskyFR/events{/privacy}", "received_events_url": "https://api.github.com/users/BlueskyFR/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
1
"2022-07-12T15:04:04"
"2022-07-14T14:17:46"
null
NONE
null
## Describe the bug When using `.with_format("torch")`, an arrow table is returned and I am unable to use it by passing it to a PyTorch DataLoader: please see the code below. ## Steps to reproduce the bug ```python from datasets import load_dataset from torch.utils.data import DataLoader ds = load_dataset( "para_crawl", name="enfr", cache_dir="/tmp/test/", split="train", keep_in_memory=True, ) dataloader = DataLoader(ds.with_format("torch"), num_workers=32) print(next(iter(dataloader))) ``` Is there something I am doing wrong? The documentation does not say much about the behavior of `.with_format()` so I feel like I am a bit stuck here :-/ Thanks in advance for your help! ## Expected results The code should run with no error ## Actual results ``` AttributeError: 'str' object has no attribute 'dtype' ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Linux-4.18.0-348.el8.x86_64-x86_64-with-glibc2.28 - Python version: 3.10.4 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4675/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4675/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4674
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4674/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4674/comments
https://api.github.com/repos/huggingface/datasets/issues/4674/events
https://github.com/huggingface/datasets/issues/4674
1,301,294,844
I_kwDODunzps5NkC78
4,674
Issue loading datasets -- pyarrow.lib has no attribute
{ "login": "margotwagner", "id": 39107794, "node_id": "MDQ6VXNlcjM5MTA3Nzk0", "avatar_url": "https://avatars.githubusercontent.com/u/39107794?v=4", "gravatar_id": "", "url": "https://api.github.com/users/margotwagner", "html_url": "https://github.com/margotwagner", "followers_url": "https://api.github.com/users/margotwagner/followers", "following_url": "https://api.github.com/users/margotwagner/following{/other_user}", "gists_url": "https://api.github.com/users/margotwagner/gists{/gist_id}", "starred_url": "https://api.github.com/users/margotwagner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/margotwagner/subscriptions", "organizations_url": "https://api.github.com/users/margotwagner/orgs", "repos_url": "https://api.github.com/users/margotwagner/repos", "events_url": "https://api.github.com/users/margotwagner/events{/privacy}", "received_events_url": "https://api.github.com/users/margotwagner/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
1
"2022-07-11T22:10:44"
"2022-07-12T04:54:31"
null
NONE
null
## Describe the bug I am trying to load sentiment analysis datasets from huggingface, but any dataset I try to use via load_dataset, I get the same error: `AttributeError: module 'pyarrow.lib' has no attribute 'IpcReadOptions'` ## Steps to reproduce the bug ```python dataset = load_dataset("glue", "cola") ``` ## Expected results Download datasets without issue. ## Actual results `AttributeError: module 'pyarrow.lib' has no attribute 'IpcReadOptions'` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: macOS-10.15.7-x86_64-i386-64bit - Python version: 3.8.5 - PyArrow version: 8.0.0 - Pandas version: 1.1.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4674/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4674/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4673
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4673/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4673/comments
https://api.github.com/repos/huggingface/datasets/issues/4673/events
https://github.com/huggingface/datasets/issues/4673
1,301,010,331
I_kwDODunzps5Ni9eb
4,673
load_datasets on csv returns everything as a string
{ "login": "courtneysprouse", "id": 25102613, "node_id": "MDQ6VXNlcjI1MTAyNjEz", "avatar_url": "https://avatars.githubusercontent.com/u/25102613?v=4", "gravatar_id": "", "url": "https://api.github.com/users/courtneysprouse", "html_url": "https://github.com/courtneysprouse", "followers_url": "https://api.github.com/users/courtneysprouse/followers", "following_url": "https://api.github.com/users/courtneysprouse/following{/other_user}", "gists_url": "https://api.github.com/users/courtneysprouse/gists{/gist_id}", "starred_url": "https://api.github.com/users/courtneysprouse/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/courtneysprouse/subscriptions", "organizations_url": "https://api.github.com/users/courtneysprouse/orgs", "repos_url": "https://api.github.com/users/courtneysprouse/repos", "events_url": "https://api.github.com/users/courtneysprouse/events{/privacy}", "received_events_url": "https://api.github.com/users/courtneysprouse/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
"2022-07-11T17:30:24"
"2022-07-12T13:33:09"
"2022-07-12T13:33:08"
NONE
null
## Describe the bug If you use: `conll_dataset.to_csv("ner_conll.csv")` It will create a csv file with all of your data as expected, however when you load it with: `conll_dataset = load_dataset("csv", data_files="ner_conll.csv")` everything is read in as a string. For example if I look at everything in 'ner_tags' I get back `['[3 0 7 0 0 0 7 0 0]', '[1 2]', '[5 0]']` instead of what I originally saved which was `[[3, 0, 7, 0, 0, 0, 7, 0, 0], [1, 2], [5, 0]]` I think maybe there is something funky going on with the csv delimiter ## Steps to reproduce the bug ```python # Sample code to reproduce the bug #load original conll dataset orig_conll = load_dataset("conll2003") #save original conll as a csv orig_conll.to_csv("ner_conll.csv") #reload conll data as a csv new_conll = load_dataset("csv", data_files="ner_conll.csv")` ``` ## Expected results A clear and concise description of the expected results. I would expect the data be returned as the data type I saved it as. I.e. if I save a list of ints [[3, 0, 7, 0, 0, 0, 7, 0, 0]], I shouldnt get back a string ['[3 0 7 0 0 0 7 0 0]'] I also get back a string when I pass a list of strings ['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.'] ## Actual results A list of strings `['[3 0 7 0 0 0 7 0 0]', '[1 2]', '[5 0]']` A string "['EU' 'rejects' 'German' 'call' 'to' 'boycott' 'British' 'lamb' '.']" ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.18.3 - Platform: Linux-5.4.0-121-generic-x86_64-with-glibc2.17 - Python version: 3.8.13 - PyArrow version: 8.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4673/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4673/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4671
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4671/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4671/comments
https://api.github.com/repos/huggingface/datasets/issues/4671/events
https://github.com/huggingface/datasets/issues/4671
1,300,385,909
I_kwDODunzps5NglB1
4,671
Dataset Viewer issue for wmt16
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
6
"2022-07-11T08:34:11"
"2022-09-13T13:27:02"
"2022-09-08T08:16:06"
MEMBER
null
### Link https://huggingface.co/datasets/wmt16 ### Description [Reported](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions/12#62cb83f14c7f35284e796f9c) by a user of AutoTrain Evaluate. AFAIK this dataset was working 1-2 weeks ago, and I'm not sure how to interpret this error. ``` Status code: 400 Exception: NotImplementedError Message: This is a abstract method ``` Thanks! ### Owner No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4671/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/huggingface/datasets/issues/4671/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4670
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4670/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4670/comments
https://api.github.com/repos/huggingface/datasets/issues/4670/events
https://github.com/huggingface/datasets/issues/4670
1,299,984,246
I_kwDODunzps5NfC92
4,670
Can't extract files from `.7z` zipfile using `download_and_extract`
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
5
"2022-07-10T18:16:49"
"2022-07-15T13:02:07"
"2022-07-15T13:02:07"
CONTRIBUTOR
null
## Describe the bug I'm adding a new dataset which is a `.7z` zip file in Google drive and contains 3 json files inside. I'm able to download the data files using `download_and_extract` but after downloading it throws this error: ``` >>> dataset = load_dataset("./datasets/mantis/") Using custom data configuration default Downloading and preparing dataset mantis/default to /Users/bhavitvyamalik/.cache/huggingface/datasets/mantis/default/1.1.0/611affa804ec53e2055a335cc1b8b213bb5a0b5142d919967729d5ee23c6bab4... Downloading data: 100%|█████████████████████████████████████████████████████████| 77.2M/77.2M [00:23<00:00, 3.28MB/s] /Users/bhavitvyamalik/.cache/huggingface/datasets/downloads/fc3d70123c9de8407587a59aa426c37819cf2bf016795d33270e8a1d558a34e6 Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/bhavitvyamalik/Desktop/work/hf/datasets/src/datasets/load.py", line 1745, in load_dataset use_auth_token=use_auth_token, File "/Users/bhavitvyamalik/Desktop/work/hf/datasets/src/datasets/builder.py", line 595, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/Users/bhavitvyamalik/Desktop/work/hf/datasets/src/datasets/builder.py", line 690, in _download_and_prepare ) from None OSError: Cannot find data file. Original error: [Errno 20] Not a directory: '/Users/bhavitvyamalik/.cache/huggingface/datasets/downloads/fc3d70123c9de8407587a59aa426c37819cf2bf016795d33270e8a1d558a34e6/merged_train.json' ``` just before generating the splits. I checked `fc3d70123c9de8407587a59aa426c37819cf2bf016795d33270e8a1d558a34e6` file and it's `7z` zip file (similar to downloaded Google drive file) which means it didn't get unzip. Do I need to unzip it separately and then pass the paths for train,dev,test files in `SplitGenerator`? ## Environment info - `datasets` version: 1.18.4.dev0 - Platform: Darwin-19.6.0-x86_64-i386-64bit - Python version: 3.7.8 - PyArrow version: 5.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4670/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4670/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4669
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4669/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4669/comments
https://api.github.com/repos/huggingface/datasets/issues/4669/events
https://github.com/huggingface/datasets/issues/4669
1,299,848,003
I_kwDODunzps5NehtD
4,669
loading oscar-corpus/OSCAR-2201 raises an error
{ "login": "vitalyshalumov", "id": 33824221, "node_id": "MDQ6VXNlcjMzODI0MjIx", "avatar_url": "https://avatars.githubusercontent.com/u/33824221?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vitalyshalumov", "html_url": "https://github.com/vitalyshalumov", "followers_url": "https://api.github.com/users/vitalyshalumov/followers", "following_url": "https://api.github.com/users/vitalyshalumov/following{/other_user}", "gists_url": "https://api.github.com/users/vitalyshalumov/gists{/gist_id}", "starred_url": "https://api.github.com/users/vitalyshalumov/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vitalyshalumov/subscriptions", "organizations_url": "https://api.github.com/users/vitalyshalumov/orgs", "repos_url": "https://api.github.com/users/vitalyshalumov/repos", "events_url": "https://api.github.com/users/vitalyshalumov/events{/privacy}", "received_events_url": "https://api.github.com/users/vitalyshalumov/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
1
"2022-07-10T07:09:30"
"2022-07-11T09:27:49"
"2022-07-11T09:27:49"
NONE
null
## Describe the bug load_dataset('oscar-2201', 'af') raises an error: Traceback (most recent call last): File "/usr/lib/python3.8/code.py", line 90, in runcode exec(code, self.locals) File "<input>", line 1, in <module> File "..python3.8/site-packages/datasets/load.py", line 1656, in load_dataset builder_instance = load_dataset_builder( File ".../lib/python3.8/site-packages/datasets/load.py", line 1439, in load_dataset_builder dataset_module = dataset_module_factory( File ".../lib/python3.8/site-packages/datasets/load.py", line 1189, in dataset_module_factory raise FileNotFoundError( FileNotFoundError: Couldn't find a dataset script at .../oscar-2201/oscar-2201.py or any data file in the same directory. Couldn't find 'oscar-2201' on the Hugging Face Hub either: FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/master/datasets/oscar-2201/oscar-2201.py I've tried other permutations such as : oscar_22 = load_dataset('oscar-2201', 'af',use_auth_token=True) oscar_22 = load_dataset('oscar-corpus/OSCAR-2201', 'af',use_auth_token=True) oscar_22 = load_dataset('oscar-2201', 'af') oscar_22 = load_dataset('oscar-corpus/OSCAR-2201') with the same unfortunate result. ## Steps to reproduce the bug oscar_22 = load_dataset('oscar-2201', 'af',use_auth_token=True) oscar_22 = load_dataset('oscar-corpus/OSCAR-2201', 'af',use_auth_token=True) oscar_22 = load_dataset('oscar-2201', 'af') oscar_22 = load_dataset('oscar-corpus/OSCAR-2201') # Sample code to reproduce the bug ``` ## Expected results loaded data ## Actual results Traceback (most recent call last): File "/usr/lib/python3.8/code.py", line 90, in runcode exec(code, self.locals) File "<input>", line 1, in <module> File "..python3.8/site-packages/datasets/load.py", line 1656, in load_dataset builder_instance = load_dataset_builder( File ".../lib/python3.8/site-packages/datasets/load.py", line 1439, in load_dataset_builder dataset_module = dataset_module_factory( File ".../lib/python3.8/site-packages/datasets/load.py", line 1189, in dataset_module_factory raise FileNotFoundError( FileNotFoundError: Couldn't find a dataset script at .../oscar-2201/oscar-2201.py or any data file in the same directory. Couldn't find 'oscar-2201' on the Hugging Face Hub either: FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/master/datasets/oscar-2201/oscar-2201.py ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Linux-5.13.0-37-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4669/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4669/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4668
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4668/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4668/comments
https://api.github.com/repos/huggingface/datasets/issues/4668/events
https://github.com/huggingface/datasets/issues/4668
1,299,735,893
I_kwDODunzps5NeGVV
4,668
Dataset Viewer issue for hungnm/multilingual-amazon-review-sentiment-processed
{ "login": "hungnmai", "id": 21364546, "node_id": "MDQ6VXNlcjIxMzY0NTQ2", "avatar_url": "https://avatars.githubusercontent.com/u/21364546?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hungnmai", "html_url": "https://github.com/hungnmai", "followers_url": "https://api.github.com/users/hungnmai/followers", "following_url": "https://api.github.com/users/hungnmai/following{/other_user}", "gists_url": "https://api.github.com/users/hungnmai/gists{/gist_id}", "starred_url": "https://api.github.com/users/hungnmai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hungnmai/subscriptions", "organizations_url": "https://api.github.com/users/hungnmai/orgs", "repos_url": "https://api.github.com/users/hungnmai/repos", "events_url": "https://api.github.com/users/hungnmai/events{/privacy}", "received_events_url": "https://api.github.com/users/hungnmai/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-09T18:04:13"
"2022-07-11T07:47:47"
"2022-07-11T07:47:47"
NONE
null
### Link https://huggingface.co/hungnm/multilingual-amazon-review-sentiment ### Description _No response_ ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4668/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4668/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4667
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4667/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4667/comments
https://api.github.com/repos/huggingface/datasets/issues/4667/events
https://github.com/huggingface/datasets/issues/4667
1,299,735,703
I_kwDODunzps5NeGSX
4,667
Dataset Viewer issue for hungnm/multilingual-amazon-review-sentiment-processed
{ "login": "hungnmai", "id": 21364546, "node_id": "MDQ6VXNlcjIxMzY0NTQ2", "avatar_url": "https://avatars.githubusercontent.com/u/21364546?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hungnmai", "html_url": "https://github.com/hungnmai", "followers_url": "https://api.github.com/users/hungnmai/followers", "following_url": "https://api.github.com/users/hungnmai/following{/other_user}", "gists_url": "https://api.github.com/users/hungnmai/gists{/gist_id}", "starred_url": "https://api.github.com/users/hungnmai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hungnmai/subscriptions", "organizations_url": "https://api.github.com/users/hungnmai/orgs", "repos_url": "https://api.github.com/users/hungnmai/repos", "events_url": "https://api.github.com/users/hungnmai/events{/privacy}", "received_events_url": "https://api.github.com/users/hungnmai/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892865, "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate", "name": "duplicate", "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
0
"2022-07-09T18:03:15"
"2022-07-11T07:47:15"
"2022-07-11T07:47:15"
NONE
null
### Link _No response_ ### Description _No response_ ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4667/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4667/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4666
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4666/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4666/comments
https://api.github.com/repos/huggingface/datasets/issues/4666/events
https://github.com/huggingface/datasets/issues/4666
1,299,732,238
I_kwDODunzps5NeFcO
4,666
Issues with concatenating datasets
{ "login": "ChenghaoMou", "id": 32014649, "node_id": "MDQ6VXNlcjMyMDE0NjQ5", "avatar_url": "https://avatars.githubusercontent.com/u/32014649?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ChenghaoMou", "html_url": "https://github.com/ChenghaoMou", "followers_url": "https://api.github.com/users/ChenghaoMou/followers", "following_url": "https://api.github.com/users/ChenghaoMou/following{/other_user}", "gists_url": "https://api.github.com/users/ChenghaoMou/gists{/gist_id}", "starred_url": "https://api.github.com/users/ChenghaoMou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChenghaoMou/subscriptions", "organizations_url": "https://api.github.com/users/ChenghaoMou/orgs", "repos_url": "https://api.github.com/users/ChenghaoMou/repos", "events_url": "https://api.github.com/users/ChenghaoMou/events{/privacy}", "received_events_url": "https://api.github.com/users/ChenghaoMou/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
"2022-07-09T17:45:14"
"2022-07-12T17:16:15"
"2022-07-12T17:16:14"
NONE
null
## Describe the bug It is impossible to concatenate datasets if a feature is sequence of dict in one dataset and a dict of sequence in another. But based on the document, it should be automatically converted. > A [datasets.Sequence](https://huggingface.co/docs/datasets/v2.3.2/en/package_reference/main_classes#datasets.Sequence) with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don’t want this behavior, you can use a python list instead of the [datasets.Sequence](https://huggingface.co/docs/datasets/v2.3.2/en/package_reference/main_classes#datasets.Sequence). ## Steps to reproduce the bug ```python from datasets import concatenate_datasets, load_dataset squad = load_dataset("squad_v2") squad["train"].to_json("output.jsonl", lines=True) temp = load_dataset("json", data_files={"train": "output.jsonl"}) concatenate_datasets([temp["train"], squad["train"]]) ``` ## Expected results No error executing that code ## Actual results ``` ValueError: The features can't be aligned because the key answers of features {'id': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'context': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None)} has unexpected type - Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None) (expected either {'text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'answer_start': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None)} or Value("null"). ``` ## Environment info - `datasets` version: 2.3.2 - Platform: macOS-12.4-arm64-arm-64bit - Python version: 3.8.11 - PyArrow version: 6.0.1 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4666/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4666/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4665
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4665/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4665/comments
https://api.github.com/repos/huggingface/datasets/issues/4665/events
https://github.com/huggingface/datasets/issues/4665
1,299,652,638
I_kwDODunzps5NdyAe
4,665
Unable to create dataset having Python dataset script only
{ "login": "aleSuglia", "id": 1479733, "node_id": "MDQ6VXNlcjE0Nzk3MzM=", "avatar_url": "https://avatars.githubusercontent.com/u/1479733?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aleSuglia", "html_url": "https://github.com/aleSuglia", "followers_url": "https://api.github.com/users/aleSuglia/followers", "following_url": "https://api.github.com/users/aleSuglia/following{/other_user}", "gists_url": "https://api.github.com/users/aleSuglia/gists{/gist_id}", "starred_url": "https://api.github.com/users/aleSuglia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aleSuglia/subscriptions", "organizations_url": "https://api.github.com/users/aleSuglia/orgs", "repos_url": "https://api.github.com/users/aleSuglia/repos", "events_url": "https://api.github.com/users/aleSuglia/events{/privacy}", "received_events_url": "https://api.github.com/users/aleSuglia/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-07-09T11:45:46"
"2022-07-11T07:10:09"
"2022-07-11T07:10:01"
CONTRIBUTOR
null
## Describe the bug Hi there, I'm trying to add the following dataset to Huggingface datasets: https://huggingface.co/datasets/Heriot-WattUniversity/dialog-babi/blob/ I'm trying to do so using the CLI commands but seems that this command generates the wrong `dataset_info.json` file (you can find it in the repo already): ``` datasets-cli test Heriot-WattUniversity/dialog-babi/dialog_babi.py --save_infos --all-configs ``` while it errors when I remove the python script: ``` datasets-cli test Heriot-WattUniversity/dialog-babi/ --save_infos --all-configs ``` The error message is the following: ``` FileNotFoundError: Unable to resolve any data file that matches '['**']' at /Users/as2180/workspace/Heriot-WattUniversity/dialog-babi with any supported extension ['csv', 'tsv', 'json', 'jsonl', 'parquet', 'txt', 'blp', 'bmp', 'dib', 'bufr', 'cur', 'pcx', 'dcx', 'dds', 'ps', 'eps', 'fit', 'fits', 'fli', 'flc', 'ftc', 'ftu', 'gbr', 'gif', 'grib', 'h5', 'hdf', 'png', 'apng', 'jp2', 'j2k', 'jpc', 'jpf', 'jpx', 'j2c', 'icns', 'ico', 'im', 'iim', 'tif', 'tiff', 'jfif', 'jpe', 'jpg', 'jpeg', 'mpg', 'mpeg', 'msp', 'pcd', 'pxr', 'pbm', 'pgm', 'ppm', 'pnm', 'psd', 'bw', 'rgb', 'rgba', 'sgi', 'ras', 'tga', 'icb', 'vda', 'vst', 'webp', 'wmf', 'emf', 'xbm', 'xpm', 'zip'] ``` ## Environment info - `datasets` version: 2.3.2 - Platform: macOS-12.4-arm64-arm-64bit - Python version: 3.9.9 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4665/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4665/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4661
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4661/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4661/comments
https://api.github.com/repos/huggingface/datasets/issues/4661/events
https://github.com/huggingface/datasets/issues/4661
1,298,374,944
I_kwDODunzps5NY6Eg
4,661
Concurrency bug when using same cache among several jobs
{ "login": "ioana-blue", "id": 17202292, "node_id": "MDQ6VXNlcjE3MjAyMjky", "avatar_url": "https://avatars.githubusercontent.com/u/17202292?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ioana-blue", "html_url": "https://github.com/ioana-blue", "followers_url": "https://api.github.com/users/ioana-blue/followers", "following_url": "https://api.github.com/users/ioana-blue/following{/other_user}", "gists_url": "https://api.github.com/users/ioana-blue/gists{/gist_id}", "starred_url": "https://api.github.com/users/ioana-blue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ioana-blue/subscriptions", "organizations_url": "https://api.github.com/users/ioana-blue/orgs", "repos_url": "https://api.github.com/users/ioana-blue/repos", "events_url": "https://api.github.com/users/ioana-blue/events{/privacy}", "received_events_url": "https://api.github.com/users/ioana-blue/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
2
"2022-07-08T01:58:11"
"2022-07-15T17:11:23"
null
NONE
null
## Describe the bug I used to see this bug with an older version of the datasets. It seems to persist. This is my concrete scenario: I launch several evaluation jobs on a cluster in which I share the file system and I share the cache directory used by huggingface libraries. The evaluation jobs read the same *.csv files. If my jobs get all scheduled pretty much at the same time, there are all kinds of weird concurrency errors. Sometime it crashes silently. This time I got lucky that it crashed with a stack trace that I can share and maybe you get to the bottom of this. If you don't have a similar setup available, it may be hard to reproduce as you really need two jobs accessing the same file at the same time to see this type of bug. ## Steps to reproduce the bug I'm running a modified version of `run_glue.py` script adapted to my use case. I've seen the same problem when running some glue datasets as well (so it's not specific to loading the datasets from csv files). ## Expected results No crash, concurrent access to the (intermediate) files just fine. ## Actual results Crashes due to races/concurrency bugs. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Linux-4.18.0-348.23.1.el8_5.x86_64-x86_64-with-glibc2.10 - Python version: 3.8.5 - PyArrow version: 8.0.0 - Pandas version: 1.1.0 Stack trace that I just got with the crash (I've obfuscated some names, it should still be quite informative): ``` Running tokenizer on dataset: 0%| | 0/3 [00:00<?, ?ba/s] Traceback (most recent call last): File "../../src/models//run_*******.py", line 600, in <module> main() File "../../src/models//run_*******.py", line 444, in main raw_datasets = raw_datasets.map( File "/*******//envs/tr-crt/lib/python3.8/site-packages/datasets/dataset_dict.py", line 770, in map { File "/*******//envs/tr-crt/lib/python3.8/site-packages/datasets/dataset_dict.py", line 771, in <dictcomp> k: dataset.map( File "/*******//envs/tr-crt/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 2376, in map return self._map_single( File "/*******/envs/tr-crt/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 551, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/*******//envs/tr-crt/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 518, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/*******/envs/tr-crt/lib/python3.8/site-packages/datasets/fingerprint.py", line 458, in wrapper out = func(self, *args, **kwargs) File "/*******//envs/tr-crt/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 2776, in _map_single buf_writer, writer, tmp_file = init_buffer_and_writer() File "/*******//envs/tr-crt/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 2696, in init_buffer_and_writer tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) File "/*******//envs/tr-crt/lib/python3.8/tempfile.py", line 541, in NamedTemporaryFile (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type) File "/*******//envs/tr-crt/lib/python3.8/tempfile.py", line 250, in _mkstemp_inner fd = _os.open(file, flags, 0o600) FileNotFoundError: [Errno 2] No such file or directory: '/*******/cache-transformers//transformers/csv/default-ef9cd184210742a7/0.0.0/51cce309a08df9c4d82ffd9363bbe090bf173197fc01a71b034e8594995a1a58/tmps8l6j5yc' ``` As I ran 100s of experiments last year for an empirical paper, I ran into this type of bugs several times. I found several bandaid/work-arounds, e.g., run one job first that caches the dataset => eliminate concurrency; OR use unique caches => eliminate concurrency (but increase storage space), etc. and it all works fine. I'd like to help you fixing this bug as it's really annoying to always apply the work arounds. Let me know what other info from my side could help you figure out the issue. Thanks for your help!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4661/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/huggingface/datasets/issues/4661/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4658
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4658/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4658/comments
https://api.github.com/repos/huggingface/datasets/issues/4658/events
https://github.com/huggingface/datasets/issues/4658
1,297,001,390
I_kwDODunzps5NTquu
4,658
Transfer CI tests to GitHub Actions
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
0
"2022-07-07T08:10:50"
"2022-07-12T11:18:25"
"2022-07-12T11:18:25"
MEMBER
null
Let's try CI tests using GitHub Actions to see if they are more stable than on CircleCI.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4658/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4658/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4657
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4657/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4657/comments
https://api.github.com/repos/huggingface/datasets/issues/4657/events
https://github.com/huggingface/datasets/issues/4657
1,296,743,133
I_kwDODunzps5NSrrd
4,657
Add SQuAD2.0 Dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
2
"2022-07-07T03:19:36"
"2022-07-12T16:14:52"
"2022-07-12T16:14:52"
NONE
null
## Adding a Dataset - **Name:** *SQuAD2.0* - **Description:** *Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.* - **Paper:** *https://aclanthology.org/P18-2124.pdf* - **Data:** *https://rajpurkar.github.io/SQuAD-explorer/* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4657/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4657/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4656
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4656/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4656/comments
https://api.github.com/repos/huggingface/datasets/issues/4656/events
https://github.com/huggingface/datasets/issues/4656
1,296,740,266
I_kwDODunzps5NSq-q
4,656
Add Amazon-QA Dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T03:15:11"
"2022-07-14T02:20:12"
"2022-07-14T02:20:12"
NONE
null
## Adding a Dataset - **Name:** *Amazon-QA* - **Description:** *The dataset is .jsonl format, where each line in the file is a json string that corresponds to a question, existing answers to the question and the extracted review snippets (relevant to the question).* - **Paper:** *https://github.com/amazonqa/amazonqa/tree/master/paper* - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/amazon-qa.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4656/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4656/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4655
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4655/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4655/comments
https://api.github.com/repos/huggingface/datasets/issues/4655/events
https://github.com/huggingface/datasets/issues/4655
1,296,720,896
I_kwDODunzps5NSmQA
4,655
Simple Wikipedia
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T02:51:26"
"2022-07-14T02:16:33"
"2022-07-14T02:16:33"
NONE
null
## Adding a Dataset - **Name:** *Simple Wikipedia* - **Description:** *Two different versions of the data set now exist. Both were generated by aligning Simple English Wikipedia and English Wikipedia. A complete description of the extraction process can be found in "Simple English Wikipedia: A New Simplification Task", William Coster and David Kauchak (2011).* - **Paper:** *https://aclanthology.org/P11-2117/* - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/SimpleWiki.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4655/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4655/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4654
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4654/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4654/comments
https://api.github.com/repos/huggingface/datasets/issues/4654/events
https://github.com/huggingface/datasets/issues/4654
1,296,716,119
I_kwDODunzps5NSlFX
4,654
Add Quora Question Triplets Dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T02:43:42"
"2022-07-14T02:13:50"
"2022-07-14T02:13:50"
NONE
null
## Adding a Dataset - **Name:** *Quora Question Triplets* - **Description:** *This dataset consists of over 400,000 lines of potential question duplicate pairs. Each line contains IDs for each question in the pair, the full text for each question, and a binary value that indicates whether the line truly contains a duplicate pair.* - **Paper:** - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/quora_duplicates_triplets.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4654/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4654/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4653
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4653/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4653/comments
https://api.github.com/repos/huggingface/datasets/issues/4653/events
https://github.com/huggingface/datasets/issues/4653
1,296,702,834
I_kwDODunzps5NSh1y
4,653
Add Altlex dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T02:23:02"
"2022-07-14T02:12:39"
"2022-07-14T02:12:39"
NONE
null
## Adding a Dataset - **Name:** *Altlex* - **Description:** *Git repository for software associated with the 2016 ACL paper "Identifying Causal Relations Using Parallel Wikipedia Articles.”* - **Paper:** *https://aclanthology.org/P16-1135.pdf* - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/altlex.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4653/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4653/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4652
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4652/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4652/comments
https://api.github.com/repos/huggingface/datasets/issues/4652/events
https://github.com/huggingface/datasets/issues/4652
1,296,697,498
I_kwDODunzps5NSgia
4,652
Add Sentence Compression Dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T02:13:46"
"2022-07-14T02:11:48"
"2022-07-14T02:11:48"
NONE
null
## Adding a Dataset - **Name:** *Sentence Compression* - **Description:** *Large corpus of uncompressed and compressed sentences from news articles.* - **Paper:** *https://www.aclweb.org/anthology/D13-1155/* - **Data:** *https://github.com/google-research-datasets/sentence-compression/tree/master/data* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4652/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4652/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4651
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4651/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4651/comments
https://api.github.com/repos/huggingface/datasets/issues/4651/events
https://github.com/huggingface/datasets/issues/4651
1,296,689,414
I_kwDODunzps5NSekG
4,651
Add Flickr 30k Dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T01:59:08"
"2022-07-14T02:09:45"
"2022-07-14T02:09:45"
NONE
null
## Adding a Dataset - **Name:** *Flickr 30k* - **Description:** *To produce the denotation graph, we have created an image caption corpus consisting of 158,915 crowd-sourced captions describing 31,783 images. This is an extension of our previous Flickr 8k Dataset. The new images and captions focus on people involved in everyday activities and events.* - **Paper:** *https://transacl.org/ojs/index.php/tacl/article/view/229/33* - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/flickr30k_captions.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4651/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4651/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4650
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4650/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4650/comments
https://api.github.com/repos/huggingface/datasets/issues/4650/events
https://github.com/huggingface/datasets/issues/4650
1,296,680,037
I_kwDODunzps5NScRl
4,650
Add SPECTER dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
open
false
null
[]
null
1
"2022-07-07T01:41:32"
"2022-07-14T02:07:49"
null
NONE
null
## Adding a Dataset - **Name:** *SPECTER* - **Description:** *SPECTER: Document-level Representation Learning using Citation-informed Transformers* - **Paper:** *https://doi.org/10.18653/v1/2020.acl-main.207* - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/specter_train_triples.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4650/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4650/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4649
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4649/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4649/comments
https://api.github.com/repos/huggingface/datasets/issues/4649/events
https://github.com/huggingface/datasets/issues/4649
1,296,673,712
I_kwDODunzps5NSauw
4,649
Add PAQ dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T01:29:42"
"2022-07-14T02:06:27"
"2022-07-14T02:06:27"
NONE
null
## Adding a Dataset - **Name:** *PAQ* - **Description:** *This repository contains code and models to support the research paper PAQ: 65 Million Probably-Asked Questions and What You Can Do With Them* - **Paper:** *https://arxiv.org/abs/2102.07033* - **Data:** *https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/PAQ_pairs.jsonl.gz* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4649/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4649/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4648
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4648/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4648/comments
https://api.github.com/repos/huggingface/datasets/issues/4648/events
https://github.com/huggingface/datasets/issues/4648
1,296,659,335
I_kwDODunzps5NSXOH
4,648
Add WikiAnswers dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
1
"2022-07-07T01:06:37"
"2022-07-14T02:03:40"
"2022-07-14T02:03:40"
NONE
null
## Adding a Dataset - **Name:** *WikiAnswers* - **Description:** *The WikiAnswers corpus contains clusters of questions tagged by WikiAnswers users as paraphrases. Each cluster optionally contains an answer provided by WikiAnswers users.* - **Paper:** *https://dl.acm.org/doi/10.1145/2623330.2623677* - **Data:** *https://github.com/afader/oqa#wikianswers-corpus* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4648/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4648/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4647
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4647/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4647/comments
https://api.github.com/repos/huggingface/datasets/issues/4647/events
https://github.com/huggingface/datasets/issues/4647
1,296,311,270
I_kwDODunzps5NRCPm
4,647
Add Reddit dataset
{ "login": "omarespejel", "id": 4755430, "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "gravatar_id": "", "url": "https://api.github.com/users/omarespejel", "html_url": "https://github.com/omarespejel", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "repos_url": "https://api.github.com/users/omarespejel/repos", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
open
false
null
[]
null
0
"2022-07-06T19:49:18"
"2022-07-06T19:49:18"
null
NONE
null
## Adding a Dataset - **Name:** *Reddit comments (2015-2018)* - **Description:** *Reddit is an American social news aggregation website, where users can post links, and take part in discussions on these posts. These threaded discussions provide a large corpus, which is converted into a conversational dataset using the tools in this directory.* - **Paper:** *https://arxiv.org/abs/1904.06472* - **Data:** *https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4647/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4647/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4642
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4642/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4642/comments
https://api.github.com/repos/huggingface/datasets/issues/4642/events
https://github.com/huggingface/datasets/issues/4642
1,295,748,083
I_kwDODunzps5NO4vz
4,642
Streaming issue for ccdv/pubmed-summarization
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
3
"2022-07-06T12:13:07"
"2022-07-06T14:17:34"
"2022-07-06T14:17:34"
MEMBER
null
### Link https://huggingface.co/datasets/ccdv/pubmed-summarization ### Description This was reported by a [user of AutoTrain Evaluate](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions/7). It seems like streaming doesn't work due to the way the dataset loading script is defined? ``` Status code: 400 Exception: FileNotFoundError Message: https://huggingface.co/datasets/ccdv/pubmed-summarization/resolve/main/train.zip/train.txt ``` ### Owner No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4642/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4642/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4641
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4641/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4641/comments
https://api.github.com/repos/huggingface/datasets/issues/4641/events
https://github.com/huggingface/datasets/issues/4641
1,295,633,250
I_kwDODunzps5NOcti
4,641
Dataset Viewer issue for kmfoda/booksum
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
3
"2022-07-06T10:38:16"
"2022-07-06T13:25:28"
"2022-07-06T11:58:06"
MEMBER
null
### Link https://huggingface.co/datasets/kmfoda/booksum ### Description A [user of AutoTrain Evaluate](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions/9) discovered this dataset cannot be streamed due to: ``` Status code: 400 Exception: ClientResponseError Message: 401, message='Unauthorized', url=URL('https://huggingface.co/datasets/kmfoda/booksum/resolve/47953f583d6967f086cb16a2f4d2346e9834024d/test.csv') ``` I'm not sure why it says "Unauthorized" since it's just a bunch of CSV files in a repo ### Owner No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4641/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4641/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4639
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4639/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4639/comments
https://api.github.com/repos/huggingface/datasets/issues/4639/events
https://github.com/huggingface/datasets/issues/4639
1,295,367,322
I_kwDODunzps5NNbya
4,639
Add HaGRID -- HAnd Gesture Recognition Image Dataset
{ "login": "osanseviero", "id": 7246357, "node_id": "MDQ6VXNlcjcyNDYzNTc=", "avatar_url": "https://avatars.githubusercontent.com/u/7246357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/osanseviero", "html_url": "https://github.com/osanseviero", "followers_url": "https://api.github.com/users/osanseviero/followers", "following_url": "https://api.github.com/users/osanseviero/following{/other_user}", "gists_url": "https://api.github.com/users/osanseviero/gists{/gist_id}", "starred_url": "https://api.github.com/users/osanseviero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/osanseviero/subscriptions", "organizations_url": "https://api.github.com/users/osanseviero/orgs", "repos_url": "https://api.github.com/users/osanseviero/repos", "events_url": "https://api.github.com/users/osanseviero/events{/privacy}", "received_events_url": "https://api.github.com/users/osanseviero/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
open
false
null
[]
null
0
"2022-07-06T07:41:32"
"2022-07-06T07:41:32"
null
MEMBER
null
## Adding a Dataset - **Name:** HaGRID -- HAnd Gesture Recognition Image Dataset - **Description:** We introduce a large image dataset HaGRID (HAnd Gesture Recognition Image Dataset) for hand gesture recognition (HGR) systems. You can use it for image classification or image detection tasks. Proposed dataset allows to build HGR systems, which can be used in video conferencing services (Zoom, Skype, Discord, Jazz etc.), home automation systems, the automotive sector, etc. - **Paper:** https://arxiv.org/abs/2206.08219 - **Data:** https://github.com/hukenovs/hagrid Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4639/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4639/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4637
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4637/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4637/comments
https://api.github.com/repos/huggingface/datasets/issues/4637/events
https://github.com/huggingface/datasets/issues/4637
1,294,818,236
I_kwDODunzps5NLVu8
4,637
The "all" split breaks streaming
{ "login": "cakiki", "id": 3664563, "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cakiki", "html_url": "https://github.com/cakiki", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "organizations_url": "https://api.github.com/users/cakiki/orgs", "repos_url": "https://api.github.com/users/cakiki/repos", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "received_events_url": "https://api.github.com/users/cakiki/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
6
"2022-07-05T21:56:49"
"2022-07-15T13:59:30"
null
CONTRIBUTOR
null
## Describe the bug Not sure if this is a bug or just the way streaming works, but setting `streaming=True` did not work when setting `split="all"` ## Steps to reproduce the bug The following works: ```python ds = load_dataset('super_glue', 'wsc.fixed', split='all') ``` The following throws `ValueError: Bad split: all. Available splits: ['train', 'validation', 'test']`: ```python ds = load_dataset('super_glue', 'wsc.fixed', split='all', streaming=True) ``` ## Expected results An iterator over all splits. ## Actual results I had to do the following to achieve the desired result: ```python from itertools import chain ds = load_dataset('super_glue', 'wsc.fixed', streaming=True) it = chain.from_iterable(ds.values()) ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Linux-4.15.0-176-generic-x86_64-with-glibc2.31 - Python version: 3.10.5 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4637/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4637/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4636
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4636/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4636/comments
https://api.github.com/repos/huggingface/datasets/issues/4636/events
https://github.com/huggingface/datasets/issues/4636
1,294,547,836
I_kwDODunzps5NKTt8
4,636
Add info in docs about behavior of download_config.num_proc
{ "login": "nateraw", "id": 32437151, "node_id": "MDQ6VXNlcjMyNDM3MTUx", "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nateraw", "html_url": "https://github.com/nateraw", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "organizations_url": "https://api.github.com/users/nateraw/orgs", "repos_url": "https://api.github.com/users/nateraw/repos", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "received_events_url": "https://api.github.com/users/nateraw/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
0
"2022-07-05T17:01:00"
"2022-07-28T10:40:32"
"2022-07-28T10:40:32"
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** I went to override `download_config.num_proc` and was confused about what was happening under the hood. It would be nice to have the behavior documented a bit better so folks know what's happening when they use it. **Describe the solution you'd like** - Add note about how the default number of workers is 16. Related code: https://github.com/huggingface/datasets/blob/7bcac0a6a0fc367cc068f184fa132b8de8dfa11d/src/datasets/download/download_manager.py#L299-L302 - Add note that if the number of workers is higher than the number of files to download, it won't use multiprocessing. **Describe alternatives you've considered** maybe it would also be nice to set `num_proc` = `num_files` when `num_proc` > `num_files`. **Additional context** ...
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4636/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4636/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4635
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4635/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4635/comments
https://api.github.com/repos/huggingface/datasets/issues/4635/events
https://github.com/huggingface/datasets/issues/4635
1,294,475,931
I_kwDODunzps5NKCKb
4,635
Dataset Viewer issue for vadis/sv-ident
{ "login": "e-tornike", "id": 20404466, "node_id": "MDQ6VXNlcjIwNDA0NDY2", "avatar_url": "https://avatars.githubusercontent.com/u/20404466?v=4", "gravatar_id": "", "url": "https://api.github.com/users/e-tornike", "html_url": "https://github.com/e-tornike", "followers_url": "https://api.github.com/users/e-tornike/followers", "following_url": "https://api.github.com/users/e-tornike/following{/other_user}", "gists_url": "https://api.github.com/users/e-tornike/gists{/gist_id}", "starred_url": "https://api.github.com/users/e-tornike/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/e-tornike/subscriptions", "organizations_url": "https://api.github.com/users/e-tornike/orgs", "repos_url": "https://api.github.com/users/e-tornike/repos", "events_url": "https://api.github.com/users/e-tornike/events{/privacy}", "received_events_url": "https://api.github.com/users/e-tornike/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
6
"2022-07-05T15:48:13"
"2022-07-06T07:13:33"
"2022-07-06T07:12:14"
NONE
null
### Link https://huggingface.co/datasets/vadis/sv-ident/viewer/default/validation ### Description Error message when loading validation split in the viewer: ``` Status code: 400 Exception: Status400Error Message: The split cache is empty. ``` ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4635/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4635/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4634
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4634/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4634/comments
https://api.github.com/repos/huggingface/datasets/issues/4634/events
https://github.com/huggingface/datasets/issues/4634
1,294,405,251
I_kwDODunzps5NJw6D
4,634
Can't load the Hausa audio dataset
{ "login": "moro23", "id": 19976800, "node_id": "MDQ6VXNlcjE5OTc2ODAw", "avatar_url": "https://avatars.githubusercontent.com/u/19976800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/moro23", "html_url": "https://github.com/moro23", "followers_url": "https://api.github.com/users/moro23/followers", "following_url": "https://api.github.com/users/moro23/following{/other_user}", "gists_url": "https://api.github.com/users/moro23/gists{/gist_id}", "starred_url": "https://api.github.com/users/moro23/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/moro23/subscriptions", "organizations_url": "https://api.github.com/users/moro23/orgs", "repos_url": "https://api.github.com/users/moro23/repos", "events_url": "https://api.github.com/users/moro23/events{/privacy}", "received_events_url": "https://api.github.com/users/moro23/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
1
"2022-07-05T14:47:36"
"2022-09-13T14:07:32"
"2022-09-13T14:07:32"
NONE
null
common_voice_train = load_dataset("common_voice", "ha", split="train+validation")
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4634/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4634/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4632
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4632/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4632/comments
https://api.github.com/repos/huggingface/datasets/issues/4632/events
https://github.com/huggingface/datasets/issues/4632
1,294,166,880
I_kwDODunzps5NI2tg
4,632
'sort' method sorts one column only
{ "login": "shachardon", "id": 42108562, "node_id": "MDQ6VXNlcjQyMTA4NTYy", "avatar_url": "https://avatars.githubusercontent.com/u/42108562?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shachardon", "html_url": "https://github.com/shachardon", "followers_url": "https://api.github.com/users/shachardon/followers", "following_url": "https://api.github.com/users/shachardon/following{/other_user}", "gists_url": "https://api.github.com/users/shachardon/gists{/gist_id}", "starred_url": "https://api.github.com/users/shachardon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shachardon/subscriptions", "organizations_url": "https://api.github.com/users/shachardon/orgs", "repos_url": "https://api.github.com/users/shachardon/repos", "events_url": "https://api.github.com/users/shachardon/events{/privacy}", "received_events_url": "https://api.github.com/users/shachardon/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
3
"2022-07-05T11:25:26"
"2022-07-07T12:06:32"
null
NONE
null
The 'sort' method changes the order of one column only (the one defined by the argument 'column'), thus creating a mismatch between a sample fields. I would expect it to change the order of the samples as a whole, based on the 'column' order.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4632/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4632/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4629
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4629/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4629/comments
https://api.github.com/repos/huggingface/datasets/issues/4629/events
https://github.com/huggingface/datasets/issues/4629
1,293,418,800
I_kwDODunzps5NGAEw
4,629
Rename repo default branch to main
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 4296013012, "node_id": "LA_kwDODunzps8AAAABAA_01A", "url": "https://api.github.com/repos/huggingface/datasets/labels/maintenance", "name": "maintenance", "color": "d4c5f9", "default": false, "description": "Maintenance tasks" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
0
"2022-07-04T17:16:10"
"2022-07-06T15:49:57"
"2022-07-06T15:49:57"
MEMBER
null
Rename repository default branch to `main` (instead of current `master`). Once renamed, users will have to manually update their local repos: - [ ] Upstream: ``` git branch -m master main git fetch upstream main git branch -u upstream/main main git remote set-head upstream -a ``` - [ ] Origin: Rename fork default branch as well at: https://github.com/USERNAME/lam/settings/branches Then: ``` git fetch origin main git remote set-head origin -a ``` CC: @sgugger
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4629/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4629/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4626
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4626/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4626/comments
https://api.github.com/repos/huggingface/datasets/issues/4626/events
https://github.com/huggingface/datasets/issues/4626
1,293,256,269
I_kwDODunzps5NFYZN
4,626
Add non-commercial licensing info for datasets for which we removed tags
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
1
"2022-07-04T14:32:43"
"2022-07-08T14:27:29"
null
MEMBER
null
We removed several YAML tags saying that certain datasets can't be used for commercial purposes: https://github.com/huggingface/datasets/pull/4613#discussion_r911919753 Reason for this is that we only allow tags that are part of our [supported list of licenses](https://github.com/huggingface/datasets/blob/84fc3ad73c85de4eda5d152dfede7671491449cb/src/datasets/utils/resources/standard_licenses.tsv) We should update the Licensing Information section of the concerned dataset cards, now that the non-commercial tag doesn't exist anymore for certain datasets
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4626/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4626/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4623
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4623/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4623/comments
https://api.github.com/repos/huggingface/datasets/issues/4623/events
https://github.com/huggingface/datasets/issues/4623
1,293,042,894
I_kwDODunzps5NEkTO
4,623
Loading MNIST as Pytorch Dataset
{ "login": "jameschapman19", "id": 56592797, "node_id": "MDQ6VXNlcjU2NTkyNzk3", "avatar_url": "https://avatars.githubusercontent.com/u/56592797?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jameschapman19", "html_url": "https://github.com/jameschapman19", "followers_url": "https://api.github.com/users/jameschapman19/followers", "following_url": "https://api.github.com/users/jameschapman19/following{/other_user}", "gists_url": "https://api.github.com/users/jameschapman19/gists{/gist_id}", "starred_url": "https://api.github.com/users/jameschapman19/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jameschapman19/subscriptions", "organizations_url": "https://api.github.com/users/jameschapman19/orgs", "repos_url": "https://api.github.com/users/jameschapman19/repos", "events_url": "https://api.github.com/users/jameschapman19/events{/privacy}", "received_events_url": "https://api.github.com/users/jameschapman19/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
4
"2022-07-04T11:33:10"
"2022-07-04T14:40:50"
null
NONE
null
## Describe the bug Conversion of MNIST dataset to pytorch fails with bug ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("mnist", split="train") dataset.set_format('torch') dataset[0] print() ``` ## Expected results Expect to see torch tensors image and label ## Actual results Traceback (most recent call last): File "C:\Program Files\JetBrains\PyCharm 2020.3.3\plugins\python\helpers\pydev\pydevd.py", line 1491, in _exec pydev_imports.execfile(file, globals, locals) # execute the script File "C:\Program Files\JetBrains\PyCharm 2020.3.3\plugins\python\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "C:/Users/chapm/PycharmProjects/multiviewdata/multiviewdata/huggingface/mnist.py", line 13, in <module> dataset[0] File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\arrow_dataset.py", line 2154, in __getitem__ return self._getitem( File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\arrow_dataset.py", line 2139, in _getitem formatted_output = format_table( File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\formatting\formatting.py", line 532, in format_table return formatter(pa_table, query_type=query_type) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\formatting\formatting.py", line 281, in __call__ return self.format_row(pa_table) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\formatting\torch_formatter.py", line 58, in format_row return self.recursive_tensorize(row) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\formatting\torch_formatter.py", line 54, in recursive_tensorize return map_nested(self._recursive_tensorize, data_struct, map_list=False) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\utils\py_utils.py", line 356, in map_nested mapped = [ File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\utils\py_utils.py", line 357, in <listcomp> _single_map_nested((function, obj, types, None, True, None)) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\utils\py_utils.py", line 309, in _single_map_nested return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\utils\py_utils.py", line 309, in <dictcomp> return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\utils\py_utils.py", line 293, in _single_map_nested return function(data_struct) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\formatting\torch_formatter.py", line 51, in _recursive_tensorize return self._tensorize(data_struct) File "C:\Users\chapm\PycharmProjects\multiviewdata\venv\lib\site-packages\datasets\formatting\torch_formatter.py", line 38, in _tensorize if np.issubdtype(value.dtype, np.integer): AttributeError: 'bytes' object has no attribute 'dtype' python-BaseException ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Windows-10-10.0.22579-SP0 - Python version: 3.9.2 - PyArrow version: 8.0.0 - Pandas version: 1.4.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4623/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4623/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4621
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4621/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4621/comments
https://api.github.com/repos/huggingface/datasets/issues/4621/events
https://github.com/huggingface/datasets/issues/4621
1,293,030,128
I_kwDODunzps5NEhLw
4,621
ImageFolder raises an error with parameters drop_metadata=True and drop_labels=False when metadata.jsonl is present
{ "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false }
[ { "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false } ]
null
0
"2022-07-04T11:21:44"
"2022-07-15T14:24:24"
"2022-07-15T14:24:24"
CONTRIBUTOR
null
## Describe the bug If you pass `drop_metadata=True` and `drop_labels=False` when a `data_dir` contains at least one `matadata.jsonl` file, you will get a KeyError. This is probably not a very useful case but we shouldn't get an error anyway. Asking users to move metadata files manually outside `data_dir` or pass features manually (when there is a tool that can infer them automatically) don't look like a good idea to me either. ## Steps to reproduce the bug ### Clone an example dataset from the Hub ```bash git clone https://huggingface.co/datasets/nateraw/test-imagefolder-metadata ``` ### Try to load it ```python from datasets import load_dataset ds = load_dataset("test-imagefolder-metadata", drop_metadata=True, drop_labels=False) ``` or even just ```python ds = load_dataset("test-imagefolder-metadata", drop_metadata=True) ``` as `drop_labels=False` is a default value. ## Expected results A DatasetDict object with two features: `"image"` and `"label"`. ## Actual results ``` Traceback (most recent call last): File "/home/polina/workspace/datasets/debug.py", line 18, in <module> ds = load_dataset( File "/home/polina/workspace/datasets/src/datasets/load.py", line 1732, in load_dataset builder_instance.download_and_prepare( File "/home/polina/workspace/datasets/src/datasets/builder.py", line 704, in download_and_prepare self._download_and_prepare( File "/home/polina/workspace/datasets/src/datasets/builder.py", line 1227, in _download_and_prepare super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) File "/home/polina/workspace/datasets/src/datasets/builder.py", line 793, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/polina/workspace/datasets/src/datasets/builder.py", line 1218, in _prepare_split example = self.info.features.encode_example(record) File "/home/polina/workspace/datasets/src/datasets/features/features.py", line 1596, in encode_example return encode_nested_example(self, example) File "/home/polina/workspace/datasets/src/datasets/features/features.py", line 1165, in encode_nested_example { File "/home/polina/workspace/datasets/src/datasets/features/features.py", line 1165, in <dictcomp> { File "/home/polina/workspace/datasets/src/datasets/utils/py_utils.py", line 249, in zip_dict yield key, tuple(d[key] for d in dicts) File "/home/polina/workspace/datasets/src/datasets/utils/py_utils.py", line 249, in <genexpr> yield key, tuple(d[key] for d in dicts) KeyError: 'label' ``` ## Environment info `datasets` master branch - `datasets` version: 2.3.3.dev0 - Platform: Linux-5.14.0-1042-oem-x86_64-with-glibc2.17 - Python version: 3.8.12 - PyArrow version: 6.0.1 - Pandas version: 1.4.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4621/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/huggingface/datasets/issues/4621/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4620
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4620/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4620/comments
https://api.github.com/repos/huggingface/datasets/issues/4620/events
https://github.com/huggingface/datasets/issues/4620
1,292,797,878
I_kwDODunzps5NDoe2
4,620
Data type is not recognized when using datetime.time
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
null
2
"2022-07-04T08:13:38"
"2022-07-07T13:57:11"
"2022-07-07T13:57:11"
CONTRIBUTOR
null
## Describe the bug Creating a dataset from a pandas dataframe with `datetime.time` format generates an error. ## Steps to reproduce the bug ```python import pandas as pd from datetime import time from datasets import Dataset df = pd.DataFrame({"feature_name": [time(1, 1, 1)]}) dataset = Dataset.from_pandas(df) ``` ## Expected results The dataset should be created. ## Actual results ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 823, in from_pandas return cls(table, info=info, split=split) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 679, in __init__ inferred_features = Features.from_arrow_schema(arrow_table.schema) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/features/features.py", line 1551, in from_arrow_schema obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema} File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/features/features.py", line 1551, in <dictcomp> obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema} File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/features/features.py", line 1315, in generate_from_arrow_type return Value(dtype=_arrow_to_datasets_dtype(pa_type)) File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/features/features.py", line 83, in _arrow_to_datasets_dtype return f"time64[{arrow_type.unit}]" AttributeError: 'pyarrow.lib.DataType' object has no attribute 'unit' ``` ## Environment info - `datasets` version: 2.3.3.dev0 - Platform: Linux-5.13.0-1031-aws-x86_64-with-glibc2.31 - Python version: 3.9.6 - PyArrow version: 7.0.0 - Pandas version: 1.4.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4620/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4620/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4619
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4619/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4619/comments
https://api.github.com/repos/huggingface/datasets/issues/4619/events
https://github.com/huggingface/datasets/issues/4619
1,292,107,275
I_kwDODunzps5NA_4L
4,619
np arrays get turned into native lists
{ "login": "ZhaofengWu", "id": 11954789, "node_id": "MDQ6VXNlcjExOTU0Nzg5", "avatar_url": "https://avatars.githubusercontent.com/u/11954789?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ZhaofengWu", "html_url": "https://github.com/ZhaofengWu", "followers_url": "https://api.github.com/users/ZhaofengWu/followers", "following_url": "https://api.github.com/users/ZhaofengWu/following{/other_user}", "gists_url": "https://api.github.com/users/ZhaofengWu/gists{/gist_id}", "starred_url": "https://api.github.com/users/ZhaofengWu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ZhaofengWu/subscriptions", "organizations_url": "https://api.github.com/users/ZhaofengWu/orgs", "repos_url": "https://api.github.com/users/ZhaofengWu/repos", "events_url": "https://api.github.com/users/ZhaofengWu/events{/privacy}", "received_events_url": "https://api.github.com/users/ZhaofengWu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
3
"2022-07-02T17:54:57"
"2022-07-03T20:27:07"
null
NONE
null
## Describe the bug When attaching an `np.array` field, it seems that it automatically gets turned into a list (see below). Why is this happening? Could it lose precision? Is there a way to make sure this doesn't happen? ## Steps to reproduce the bug ```python >>> import datasets, numpy as np >>> dataset = datasets.load_dataset("glue", "mrpc")["validation"] Reusing dataset glue (...) 100%|███████████████████████████████████████████████| 3/3 [00:00<00:00, 1360.61it/s] >>> dataset2 = dataset.map(lambda x: {"tmp": np.array([0.5])}, batched=False) 100%|██████████████████████████████████████████| 408/408 [00:00<00:00, 10819.97ex/s] >>> dataset2[0]["tmp"] [0.5] >>> type(dataset2[0]["tmp"]) <class 'list'> ``` ## Expected results `dataset2[0]["tmp"]` should be an `np.ndarray`. ## Actual results It's a list. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: mac, though I'm pretty sure it happens on a linux machine too - Python version: 3.9.7 - PyArrow version: 6.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4619/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4619/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4618
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4618/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4618/comments
https://api.github.com/repos/huggingface/datasets/issues/4618/events
https://github.com/huggingface/datasets/issues/4618
1,292,078,225
I_kwDODunzps5NA4yR
4,618
contribute data loading for object detection datasets with yolo data format
{ "login": "faizankshaikh", "id": 8406903, "node_id": "MDQ6VXNlcjg0MDY5MDM=", "avatar_url": "https://avatars.githubusercontent.com/u/8406903?v=4", "gravatar_id": "", "url": "https://api.github.com/users/faizankshaikh", "html_url": "https://github.com/faizankshaikh", "followers_url": "https://api.github.com/users/faizankshaikh/followers", "following_url": "https://api.github.com/users/faizankshaikh/following{/other_user}", "gists_url": "https://api.github.com/users/faizankshaikh/gists{/gist_id}", "starred_url": "https://api.github.com/users/faizankshaikh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/faizankshaikh/subscriptions", "organizations_url": "https://api.github.com/users/faizankshaikh/orgs", "repos_url": "https://api.github.com/users/faizankshaikh/repos", "events_url": "https://api.github.com/users/faizankshaikh/events{/privacy}", "received_events_url": "https://api.github.com/users/faizankshaikh/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
4
"2022-07-02T15:21:59"
"2022-07-21T14:10:44"
null
NONE
null
**Is your feature request related to a problem? Please describe.** At the moment, HF datasets loads [image classification datasets](https://huggingface.co/docs/datasets/image_process) out-of-the-box. There could be a data loader for loading standard object detection datasets ([original discussion here](https://huggingface.co/datasets/jalFaizy/detect_chess_pieces/discussions/2)) **Describe the solution you'd like** I wrote a [custom script](https://huggingface.co/datasets/jalFaizy/detect_chess_pieces/blob/main/detect_chess_pieces.py) to load dataset which has YOLO data format. **Describe alternatives you've considered** The script can either be a standalone dataset builder, or a modified version of `ImageFolder` **Additional context** I would be happy to contribute to this, but I would do it at a very slow pace (maybe a month or two) as I have my exams approaching 😄
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4618/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4618/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4612
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4612/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4612/comments
https://api.github.com/repos/huggingface/datasets/issues/4612/events
https://github.com/huggingface/datasets/issues/4612
1,290,984,660
I_kwDODunzps5M8tzU
4,612
Release 2.3.0 broke custom iterable datasets
{ "login": "aapot", "id": 19529125, "node_id": "MDQ6VXNlcjE5NTI5MTI1", "avatar_url": "https://avatars.githubusercontent.com/u/19529125?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aapot", "html_url": "https://github.com/aapot", "followers_url": "https://api.github.com/users/aapot/followers", "following_url": "https://api.github.com/users/aapot/following{/other_user}", "gists_url": "https://api.github.com/users/aapot/gists{/gist_id}", "starred_url": "https://api.github.com/users/aapot/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aapot/subscriptions", "organizations_url": "https://api.github.com/users/aapot/orgs", "repos_url": "https://api.github.com/users/aapot/repos", "events_url": "https://api.github.com/users/aapot/events{/privacy}", "received_events_url": "https://api.github.com/users/aapot/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
3
"2022-07-01T06:46:07"
"2022-07-05T15:08:21"
"2022-07-05T15:08:21"
NONE
null
## Describe the bug Trying to iterate examples from custom iterable dataset fails to bug introduced in `torch_iterable_dataset.py` since the release of 2.3.0. ## Steps to reproduce the bug ```python next(iter(custom_iterable_dataset)) ``` ## Expected results `next(iter(custom_iterable_dataset))` should return examples from the dataset ## Actual results ``` /usr/local/lib/python3.7/dist-packages/datasets/formatting/dataset_wrappers/torch_iterable_dataset.py in _set_fsspec_for_multiprocess() 16 See https://github.com/fsspec/gcsfs/issues/379 17 """ ---> 18 fsspec.asyn.iothread[0] = None 19 fsspec.asyn.loop[0] = None 20 AttributeError: module 'fsspec' has no attribute 'asyn' ``` ## Environment info - `datasets` version: 2.3.0 - Platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.13 - PyArrow version: 8.0.0 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4612/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4612/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4610
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4610/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4610/comments
https://api.github.com/repos/huggingface/datasets/issues/4610/events
https://github.com/huggingface/datasets/issues/4610
1,290,603,827
I_kwDODunzps5M7Q0z
4,610
codeparrot/github-code failing to load
{ "login": "PyDataBlog", "id": 29863388, "node_id": "MDQ6VXNlcjI5ODYzMzg4", "avatar_url": "https://avatars.githubusercontent.com/u/29863388?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PyDataBlog", "html_url": "https://github.com/PyDataBlog", "followers_url": "https://api.github.com/users/PyDataBlog/followers", "following_url": "https://api.github.com/users/PyDataBlog/following{/other_user}", "gists_url": "https://api.github.com/users/PyDataBlog/gists{/gist_id}", "starred_url": "https://api.github.com/users/PyDataBlog/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PyDataBlog/subscriptions", "organizations_url": "https://api.github.com/users/PyDataBlog/orgs", "repos_url": "https://api.github.com/users/PyDataBlog/repos", "events_url": "https://api.github.com/users/PyDataBlog/events{/privacy}", "received_events_url": "https://api.github.com/users/PyDataBlog/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
8
"2022-06-30T20:24:48"
"2022-07-05T14:24:13"
"2022-07-05T09:19:56"
NONE
null
## Describe the bug codeparrot/github-code fails to load with a `TypeError: get_patterns_in_dataset_repository() missing 1 required positional argument: 'base_path'` ## Steps to reproduce the bug ```python from datasets import load_dataset ``` ## Expected results loaded dataset object ## Actual results ```python [3]: dataset = load_dataset("codeparrot/github-code") No config specified, defaulting to: github-code/all-all Downloading and preparing dataset github-code/all-all to /home/bebr/.cache/huggingface/datasets/codeparrot___github-code/all-all/0.0.0/a55513bc0f81db773f9896c7aac225af0cff5b323bb9d2f68124f0a8cc3fb817... --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Input In [3], in <cell line: 1>() ----> 1 dataset = load_dataset("codeparrot/github-code") File ~/miniconda3/envs/fastapi-kube/lib/python3.10/site-packages/datasets/load.py:1679, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1676 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES 1678 # Download and prepare data -> 1679 builder_instance.download_and_prepare( 1680 download_config=download_config, 1681 download_mode=download_mode, 1682 ignore_verifications=ignore_verifications, 1683 try_from_hf_gcs=try_from_hf_gcs, 1684 use_auth_token=use_auth_token, 1685 ) 1687 # Build dataset for splits 1688 keep_in_memory = ( 1689 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 1690 ) File ~/miniconda3/envs/fastapi-kube/lib/python3.10/site-packages/datasets/builder.py:704, in DatasetBuilder.download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 702 logger.warning("HF google storage unreachable. Downloading and preparing it from source") 703 if not downloaded_from_gcs: --> 704 self._download_and_prepare( 705 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 706 ) 707 # Sync info 708 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) File ~/miniconda3/envs/fastapi-kube/lib/python3.10/site-packages/datasets/builder.py:1221, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verify_infos) 1220 def _download_and_prepare(self, dl_manager, verify_infos): -> 1221 super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) File ~/miniconda3/envs/fastapi-kube/lib/python3.10/site-packages/datasets/builder.py:771, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 769 split_dict = SplitDict(dataset_name=self.name) 770 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 771 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 773 # Checksums verification 774 if verify_infos and dl_manager.record_checksums: File ~/.cache/huggingface/modules/datasets_modules/datasets/codeparrot--github-code/a55513bc0f81db773f9896c7aac225af0cff5b323bb9d2f68124f0a8cc3fb817/github-code.py:169, in GithubCode._split_generators(self, dl_manager) 162 def _split_generators(self, dl_manager): 164 hfh_dataset_info = HfApi(datasets.config.HF_ENDPOINT).dataset_info( 165 _REPO_NAME, 166 timeout=100.0, 167 ) --> 169 patterns = datasets.data_files.get_patterns_in_dataset_repository(hfh_dataset_info) 170 data_files = datasets.data_files.DataFilesDict.from_hf_repo( 171 patterns, 172 dataset_info=hfh_dataset_info, 173 ) 175 files = dl_manager.download_and_extract(data_files["train"]) TypeError: get_patterns_in_dataset_repository() missing 1 required positional argument: 'base_path' ``` ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.18.7-arch1-1-x86_64-with-glibc2.35 - Python version: 3.10.5 - PyArrow version: 8.0.0 - Pandas version: 1.4.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4610/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4610/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4609
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4609/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4609/comments
https://api.github.com/repos/huggingface/datasets/issues/4609/events
https://github.com/huggingface/datasets/issues/4609
1,290,392,083
I_kwDODunzps5M6dIT
4,609
librispeech dataset has to download whole subset when specifing the split to use
{ "login": "sunhaozhepy", "id": 73462159, "node_id": "MDQ6VXNlcjczNDYyMTU5", "avatar_url": "https://avatars.githubusercontent.com/u/73462159?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sunhaozhepy", "html_url": "https://github.com/sunhaozhepy", "followers_url": "https://api.github.com/users/sunhaozhepy/followers", "following_url": "https://api.github.com/users/sunhaozhepy/following{/other_user}", "gists_url": "https://api.github.com/users/sunhaozhepy/gists{/gist_id}", "starred_url": "https://api.github.com/users/sunhaozhepy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sunhaozhepy/subscriptions", "organizations_url": "https://api.github.com/users/sunhaozhepy/orgs", "repos_url": "https://api.github.com/users/sunhaozhepy/repos", "events_url": "https://api.github.com/users/sunhaozhepy/events{/privacy}", "received_events_url": "https://api.github.com/users/sunhaozhepy/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
2
"2022-06-30T16:38:24"
"2022-07-12T21:44:32"
"2022-07-12T21:44:32"
NONE
null
## Describe the bug librispeech dataset has to download whole subset when specifing the split to use ## Steps to reproduce the bug see below # Sample code to reproduce the bug ``` !pip install datasets from datasets import load_dataset raw_dataset = load_dataset("librispeech_asr", "clean", split="train.100") ``` ## Expected results The split "train.clean.100" is downloaded. ## Actual results All four splits in "clean" subset is downloaded. ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.13 - PyArrow version: 6.0.1 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4609/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4609/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4606
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4606/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4606/comments
https://api.github.com/repos/huggingface/datasets/issues/4606/events
https://github.com/huggingface/datasets/issues/4606
1,290,083,534
I_kwDODunzps5M5RzO
4,606
evaluation result changes after `datasets` version change
{ "login": "thnkinbtfly", "id": 70014488, "node_id": "MDQ6VXNlcjcwMDE0NDg4", "avatar_url": "https://avatars.githubusercontent.com/u/70014488?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thnkinbtfly", "html_url": "https://github.com/thnkinbtfly", "followers_url": "https://api.github.com/users/thnkinbtfly/followers", "following_url": "https://api.github.com/users/thnkinbtfly/following{/other_user}", "gists_url": "https://api.github.com/users/thnkinbtfly/gists{/gist_id}", "starred_url": "https://api.github.com/users/thnkinbtfly/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thnkinbtfly/subscriptions", "organizations_url": "https://api.github.com/users/thnkinbtfly/orgs", "repos_url": "https://api.github.com/users/thnkinbtfly/repos", "events_url": "https://api.github.com/users/thnkinbtfly/events{/privacy}", "received_events_url": "https://api.github.com/users/thnkinbtfly/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
1
"2022-06-30T12:43:26"
"2022-07-04T17:47:32"
null
NONE
null
## Describe the bug evaluation result changes after `datasets` version change ## Steps to reproduce the bug 1. Train a model on WikiAnn 2. reload the ckpt -> test accuracy becomes same as eval accuracy 3. such behavior is gone after downgrading `datasets` https://colab.research.google.com/drive/1kYz7-aZRGdayaq-gDTt30tyEgsKlpYOw?usp=sharing ## Expected results evaluation result shouldn't change before/after `datasets` version changes ## Actual results evaluation result changes before/after `datasets` version changes ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: colab - Python version: 3.7.13 - PyArrow version: 6.0.1 Q. How could the evaluation result change before/after `datasets` version changes?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4606/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4606/timeline
null
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4605
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4605/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4605/comments
https://api.github.com/repos/huggingface/datasets/issues/4605/events
https://github.com/huggingface/datasets/issues/4605
1,290,058,970
I_kwDODunzps5M5Lza
4,605
Dataset Viewer issue for boris/gis_filtered
{ "login": "WaterKnight1998", "id": 41203448, "node_id": "MDQ6VXNlcjQxMjAzNDQ4", "avatar_url": "https://avatars.githubusercontent.com/u/41203448?v=4", "gravatar_id": "", "url": "https://api.github.com/users/WaterKnight1998", "html_url": "https://github.com/WaterKnight1998", "followers_url": "https://api.github.com/users/WaterKnight1998/followers", "following_url": "https://api.github.com/users/WaterKnight1998/following{/other_user}", "gists_url": "https://api.github.com/users/WaterKnight1998/gists{/gist_id}", "starred_url": "https://api.github.com/users/WaterKnight1998/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/WaterKnight1998/subscriptions", "organizations_url": "https://api.github.com/users/WaterKnight1998/orgs", "repos_url": "https://api.github.com/users/WaterKnight1998/repos", "events_url": "https://api.github.com/users/WaterKnight1998/events{/privacy}", "received_events_url": "https://api.github.com/users/WaterKnight1998/received_events", "type": "User", "site_admin": false }
[ { "id": 3287858981, "node_id": "MDU6TGFiZWwzMjg3ODU4OTgx", "url": "https://api.github.com/repos/huggingface/datasets/labels/streaming", "name": "streaming", "color": "fef2c0", "default": false, "description": "" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
5
"2022-06-30T12:23:34"
"2022-07-06T12:34:19"
"2022-07-06T12:34:19"
NONE
null
### Link https://huggingface.co/datasets/boris/gis_filtered/viewer/boris--gis_filtered/train ### Description When I try to access this from the website I get this error: Status code: 400 Exception: ClientResponseError Message: 401, message='Unauthorized', url=URL('https://huggingface.co/datasets/boris/gis_filtered/resolve/80b805053ce61d4eb487b6b8d9095d775c2c466e/data/train/0000.parquet') If I try to load with code I also get the same issue: ```python dataset2_train=load_dataset("boris/gis_filtered", use_auth_token=os.environ["HF_TOKEN"],split="train",streaming=True) dataset2_validation=load_dataset("boris/gis_filtered", use_auth_token=os.environ["HF_TOKEN"], split="validation",streaming=True) ``` ### Owner No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4605/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4605/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4603
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4603/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4603/comments
https://api.github.com/repos/huggingface/datasets/issues/4603/events
https://github.com/huggingface/datasets/issues/4603
1,289,963,331
I_kwDODunzps5M40dD
4,603
CI fails recurrently and randomly on Windows
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
0
"2022-06-30T10:59:58"
"2022-06-30T13:22:25"
"2022-06-30T13:22:25"
MEMBER
null
As reported by @lhoestq, The windows CI is currently flaky: some dependencies like `aiobotocore`, `multiprocess` and `seqeval` sometimes fail to install. In particular it seems that building the wheels fail. Here is an example of logs: ``` Building wheel for seqeval (setup.py): started Running command 'C:\tools\miniconda3\envs\py37\python.exe' -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '"'"'C:\\Users\\circleci\\AppData\\Local\\Temp\\pip-install-h55pfgbv\\seqeval_d6cdb9d23ff6490b98b6c4bcaecb516e\\setup.py'"'"'; __file__='"'"'C:\\Users\\circleci\\AppData\\Local\\Temp\\pip-install-h55pfgbv\\seqeval_d6cdb9d23ff6490b98b6c4bcaecb516e\\setup.py'"'"';f = getattr(tokenize, '"'"'open'"'"', open)(__file__) if os.path.exists(__file__) else io.StringIO('"'"'from setuptools import setup; setup()'"'"');code = f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d 'C:\Users\circleci\AppData\Local\Temp\pip-wheel-x3cc8ym6' No parent package detected, impossible to derive `name` running bdist_wheel running build running build_py package init file 'seqeval\__init__.py' not found (or not a regular file) package init file 'seqeval\metrics\__init__.py' not found (or not a regular file) C:\tools\miniconda3\envs\py37\lib\site-packages\setuptools\command\install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools. setuptools.SetuptoolsDeprecationWarning, installing to build\bdist.win-amd64\wheel running install running install_lib warning: install_lib: 'build\lib' does not exist -- no Python modules to install running install_egg_info running egg_info creating UNKNOWN.egg-info writing UNKNOWN.egg-info\PKG-INFO writing dependency_links to UNKNOWN.egg-info\dependency_links.txt writing top-level names to UNKNOWN.egg-info\top_level.txt writing manifest file 'UNKNOWN.egg-info\SOURCES.txt' reading manifest file 'UNKNOWN.egg-info\SOURCES.txt' writing manifest file 'UNKNOWN.egg-info\SOURCES.txt' Copying UNKNOWN.egg-info to build\bdist.win-amd64\wheel\.\UNKNOWN-0.0.0-py3.7.egg-info running install_scripts creating build\bdist.win-amd64\wheel\UNKNOWN-0.0.0.dist-info\WHEEL creating 'C:\Users\circleci\AppData\Local\Temp\pip-wheel-x3cc8ym6\UNKNOWN-0.0.0-py3-none-any.whl' and adding 'build\bdist.win-amd64\wheel' to it adding 'UNKNOWN-0.0.0.dist-info/METADATA' adding 'UNKNOWN-0.0.0.dist-info/WHEEL' adding 'UNKNOWN-0.0.0.dist-info/top_level.txt' adding 'UNKNOWN-0.0.0.dist-info/RECORD' removing build\bdist.win-amd64\wheel Building wheel for seqeval (setup.py): finished with status 'done' Created wheel for seqeval: filename=UNKNOWN-0.0.0-py3-none-any.whl size=963 sha256=67eb93a6e1ff4796c5882a13f9fa25bb0d3d103796e2525f9cecf3b2ef26d4b1 Stored in directory: c:\users\circleci\appdata\local\pip\cache\wheels\05\96\ee\7cac4e74f3b19e3158dce26a20a1c86b3533c43ec72a549fd7 WARNING: Built wheel for seqeval is invalid: Wheel has unexpected file name: expected 'seqeval', got 'UNKNOWN' ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4603/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4603/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4597
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4597/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4597/comments
https://api.github.com/repos/huggingface/datasets/issues/4597/events
https://github.com/huggingface/datasets/issues/4597
1,288,672,007
I_kwDODunzps5Mz5MH
4,597
Streaming issue for financial_phrasebank
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 4069435429, "node_id": "LA_kwDODunzps7yjqgl", "url": "https://api.github.com/repos/huggingface/datasets/labels/hosted-on-google-drive", "name": "hosted-on-google-drive", "color": "8B51EF", "default": false, "description": "" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
3
"2022-06-29T12:45:43"
"2022-07-01T09:29:36"
"2022-07-01T09:29:36"
MEMBER
null
### Link https://huggingface.co/datasets/financial_phrasebank/viewer/sentences_allagree/train ### Description As reported by a community member using [AutoTrain Evaluate](https://huggingface.co/spaces/autoevaluate/model-evaluator/discussions/5#62bc217436d0e5d316a768f0), there seems to be a problem streaming this dataset: ``` Server error Status code: 400 Exception: Exception Message: Give up after 5 attempts with ConnectionError ``` ### Owner No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4597/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4597/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4596
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4596/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4596/comments
https://api.github.com/repos/huggingface/datasets/issues/4596/events
https://github.com/huggingface/datasets/issues/4596
1,288,381,735
I_kwDODunzps5MyyUn
4,596
Dataset Viewer issue for universal_dependencies
{ "login": "Jordy-VL", "id": 16034009, "node_id": "MDQ6VXNlcjE2MDM0MDA5", "avatar_url": "https://avatars.githubusercontent.com/u/16034009?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Jordy-VL", "html_url": "https://github.com/Jordy-VL", "followers_url": "https://api.github.com/users/Jordy-VL/followers", "following_url": "https://api.github.com/users/Jordy-VL/following{/other_user}", "gists_url": "https://api.github.com/users/Jordy-VL/gists{/gist_id}", "starred_url": "https://api.github.com/users/Jordy-VL/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jordy-VL/subscriptions", "organizations_url": "https://api.github.com/users/Jordy-VL/orgs", "repos_url": "https://api.github.com/users/Jordy-VL/repos", "events_url": "https://api.github.com/users/Jordy-VL/events{/privacy}", "received_events_url": "https://api.github.com/users/Jordy-VL/received_events", "type": "User", "site_admin": false }
[ { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
closed
false
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false } ]
null
2
"2022-06-29T08:50:29"
"2022-09-07T11:29:28"
"2022-09-07T11:29:27"
NONE
null
### Link https://huggingface.co/datasets/universal_dependencies ### Description invalid json response body at https://datasets-server.huggingface.co/splits?dataset=universal_dependencies reason: Unexpected token I in JSON at position 0 ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4596/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4596/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4595
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4595/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4595/comments
https://api.github.com/repos/huggingface/datasets/issues/4595/events
https://github.com/huggingface/datasets/issues/4595
1,288,275,976
I_kwDODunzps5MyYgI
4,595
Dataset Viewer issue with False positive PII redaction
{ "login": "cakiki", "id": 3664563, "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cakiki", "html_url": "https://github.com/cakiki", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "organizations_url": "https://api.github.com/users/cakiki/orgs", "repos_url": "https://api.github.com/users/cakiki/repos", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "received_events_url": "https://api.github.com/users/cakiki/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
2
"2022-06-29T07:15:57"
"2022-06-29T08:29:41"
"2022-06-29T08:27:49"
CONTRIBUTOR
null
### Link https://huggingface.co/datasets/cakiki/rosetta-code ### Description Hello, I just noticed an entry being redacted that shouldn't have been: `RootMeanSquare@Range[10]` is being displayed as `[email protected][10]` ### Owner _No response_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4595/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4595/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4594
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4594/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4594/comments
https://api.github.com/repos/huggingface/datasets/issues/4594/events
https://github.com/huggingface/datasets/issues/4594
1,288,070,023
I_kwDODunzps5MxmOH
4,594
load_from_disk suggests incorrect fix when used to load DatasetDict
{ "login": "dvsth", "id": 11157811, "node_id": "MDQ6VXNlcjExMTU3ODEx", "avatar_url": "https://avatars.githubusercontent.com/u/11157811?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dvsth", "html_url": "https://github.com/dvsth", "followers_url": "https://api.github.com/users/dvsth/followers", "following_url": "https://api.github.com/users/dvsth/following{/other_user}", "gists_url": "https://api.github.com/users/dvsth/gists{/gist_id}", "starred_url": "https://api.github.com/users/dvsth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dvsth/subscriptions", "organizations_url": "https://api.github.com/users/dvsth/orgs", "repos_url": "https://api.github.com/users/dvsth/repos", "events_url": "https://api.github.com/users/dvsth/events{/privacy}", "received_events_url": "https://api.github.com/users/dvsth/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
0
"2022-06-29T01:40:01"
"2022-06-29T04:03:44"
"2022-06-29T04:03:44"
NONE
null
Edit: Please feel free to remove this issue. The problem was not the error message but the fact that the DatasetDict.load_from_disk does not support loading nested splits, i.e. if one of the splits is itself a DatasetDict. If nesting splits is an antipattern, perhaps the load_from_disk function can throw a warning indicating that?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4594/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4594/timeline
null
not_planned
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4592
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4592/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4592/comments
https://api.github.com/repos/huggingface/datasets/issues/4592/events
https://github.com/huggingface/datasets/issues/4592
1,288,029,377
I_kwDODunzps5MxcTB
4,592
Issue with jalFaizy/detect_chess_pieces when running datasets-cli test
{ "login": "faizankshaikh", "id": 8406903, "node_id": "MDQ6VXNlcjg0MDY5MDM=", "avatar_url": "https://avatars.githubusercontent.com/u/8406903?v=4", "gravatar_id": "", "url": "https://api.github.com/users/faizankshaikh", "html_url": "https://github.com/faizankshaikh", "followers_url": "https://api.github.com/users/faizankshaikh/followers", "following_url": "https://api.github.com/users/faizankshaikh/following{/other_user}", "gists_url": "https://api.github.com/users/faizankshaikh/gists{/gist_id}", "starred_url": "https://api.github.com/users/faizankshaikh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/faizankshaikh/subscriptions", "organizations_url": "https://api.github.com/users/faizankshaikh/orgs", "repos_url": "https://api.github.com/users/faizankshaikh/repos", "events_url": "https://api.github.com/users/faizankshaikh/events{/privacy}", "received_events_url": "https://api.github.com/users/faizankshaikh/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
3
"2022-06-29T00:15:54"
"2022-06-29T10:30:03"
"2022-06-29T07:49:27"
NONE
null
### Link https://huggingface.co/datasets/jalFaizy/detect_chess_pieces ### Description I am trying to write a appropriate data loader for [a custom dataset](https://huggingface.co/datasets/jalFaizy/detect_chess_pieces) using [this script](https://huggingface.co/datasets/jalFaizy/detect_chess_pieces/blob/main/detect_chess_pieces.py) When I run the command `$ datasets-cli test "D:\workspace\HF\detect_chess_pieces" --save_infos --all_configs` It gives the following error ``` Using custom data configuration default Traceback (most recent call last): File "c:\users\faiza\anaconda3\lib\runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "c:\users\faiza\anaconda3\lib\runpy.py", line 87, in _run_code exec(code, run_globals) File "C:\Users\faiza\anaconda3\Scripts\datasets-cli.exe\__main__.py", line 7, in <module> File "c:\users\faiza\anaconda3\lib\site-packages\datasets\commands\datasets_cli.py", line 39, in main service.run() File "c:\users\faiza\anaconda3\lib\site-packages\datasets\commands\test.py", line 132, in run for j, builder in enumerate(get_builders()): File "c:\users\faiza\anaconda3\lib\site-packages\datasets\commands\test.py", line 125, in get_builders yield builder_cls( File "c:\users\faiza\anaconda3\lib\site-packages\datasets\builder.py", line 1148, in __init__ super().__init__(*args, **kwargs) File "c:\users\faiza\anaconda3\lib\site-packages\datasets\builder.py", line 306, in __init__ info = self.get_exported_dataset_info() File "c:\users\faiza\anaconda3\lib\site-packages\datasets\builder.py", line 405, in get_exported_dataset_info return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo()) File "c:\users\faiza\anaconda3\lib\site-packages\datasets\builder.py", line 390, in get_all_exported_dataset_infos return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) File "c:\users\faiza\anaconda3\lib\site-packages\datasets\info.py", line 309, in from_directory dataset_infos_dict = { File "c:\users\faiza\anaconda3\lib\site-packages\datasets\info.py", line 310, in <dictcomp> config_name: DatasetInfo.from_dict(dataset_info_dict) File "c:\users\faiza\anaconda3\lib\site-packages\datasets\info.py", line 272, in from_dict return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) File "<string>", line 20, in __init__ File "c:\users\faiza\anaconda3\lib\site-packages\datasets\info.py", line 160, in __post_init__ templates = [ File "c:\users\faiza\anaconda3\lib\site-packages\datasets\info.py", line 161, in <listcomp> template if isinstance(template, TaskTemplate) else task_template_from_dict(template) File "c:\users\faiza\anaconda3\lib\site-packages\datasets\tasks\__init__.py", line 43, in task_template_from_dict return template.from_dict(task_template_dict) AttributeError: 'NoneType' object has no attribute 'from_dict' ``` My assumption is that there is some kind of issue in how the "task_templates" are read, because even if I keep them as None, or not include the argument at all, the same error occurs ### Owner Yes
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4592/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4592/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4591
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4591/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4591/comments
https://api.github.com/repos/huggingface/datasets/issues/4591/events
https://github.com/huggingface/datasets/issues/4591
1,288,021,332
I_kwDODunzps5MxaVU
4,591
Can't push Images to hub with manual Dataset
{ "login": "cceyda", "id": 15624271, "node_id": "MDQ6VXNlcjE1NjI0Mjcx", "avatar_url": "https://avatars.githubusercontent.com/u/15624271?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cceyda", "html_url": "https://github.com/cceyda", "followers_url": "https://api.github.com/users/cceyda/followers", "following_url": "https://api.github.com/users/cceyda/following{/other_user}", "gists_url": "https://api.github.com/users/cceyda/gists{/gist_id}", "starred_url": "https://api.github.com/users/cceyda/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cceyda/subscriptions", "organizations_url": "https://api.github.com/users/cceyda/orgs", "repos_url": "https://api.github.com/users/cceyda/repos", "events_url": "https://api.github.com/users/cceyda/events{/privacy}", "received_events_url": "https://api.github.com/users/cceyda/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
null
1
"2022-06-29T00:01:23"
"2022-07-08T12:01:36"
"2022-07-08T12:01:35"
CONTRIBUTOR
null
## Describe the bug If I create a dataset including an 'Image' feature manually, when pushing to hub decoded images are not pushed, instead it looks for image where image local path is/used to be. This doesn't (at least didn't used to) happen with imagefolder. I want to build dataset manually because it is complicated. This happens even though the dataset is looking like decoded images: ![image](https://user-images.githubusercontent.com/15624271/176322689-2cc819cf-9d5c-4a8f-9f3d-83ae8ec06f20.png) and I use `embed_external_files=True` while `push_to_hub` (same with false) ## Steps to reproduce the bug ```python from PIL import Image from datasets import Image as ImageFeature from datasets import Features,Dataset #manually create dataset feats=Features( { "images": [ImageFeature()], #same even if explicitly ImageFeature(decode=True) "input_image": ImageFeature(), } ) test_data={"images":[[Image.open("test.jpg"),Image.open("test.jpg"),Image.open("test.jpg")]], "input_image":[Image.open("test.jpg")]} test_dataset=Dataset.from_dict(test_data,features=feats) print(test_dataset) test_dataset.push_to_hub("ceyda/image_test_public",private=False,token="",embed_external_files=True) # clear cache rm -r ~/.cache/huggingface # remove "test.jpg" # remove to see that it is looking for image on the local path test_dataset=load_dataset("ceyda/image_test_public",use_auth_token="") print(test_dataset) print(test_dataset['train'][0]) ``` ## Expected results should be able to push image bytes if dataset has `Image(decode=True)` ## Actual results errors because it is trying to decode file from the non existing local path. ``` ----> print(test_dataset['train'][0]) File ~/.local/lib/python3.8/site-packages/datasets/arrow_dataset.py:2154, in Dataset.__getitem__(self, key) 2152 def __getitem__(self, key): # noqa: F811 2153 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" -> 2154 return self._getitem( 2155 key, 2156 ) File ~/.local/lib/python3.8/site-packages/datasets/arrow_dataset.py:2139, in Dataset._getitem(self, key, decoded, **kwargs) 2137 formatter = get_formatter(format_type, features=self.features, decoded=decoded, **format_kwargs) 2138 pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None) -> 2139 formatted_output = format_table( 2140 pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns 2141 ) 2142 return formatted_output File ~/.local/lib/python3.8/site-packages/datasets/formatting/formatting.py:532, in format_table(table, key, formatter, format_columns, output_all_columns) 530 python_formatter = PythonFormatter(features=None) 531 if format_columns is None: ... -> 3068 fp = builtins.open(filename, "rb") 3069 exclusive_fp = True 3071 try: FileNotFoundError: [Errno 2] No such file or directory: 'test.jpg' ``` ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.4.0-1074-azure-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 8.0.0 - Pandas version: 1.4.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4591/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4591/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4589
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4589/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4589/comments
https://api.github.com/repos/huggingface/datasets/issues/4589/events
https://github.com/huggingface/datasets/issues/4589
1,287,600,029
I_kwDODunzps5Mvzed
4,589
Permission denied: '/home/.cache' when load_dataset with local script
{ "login": "jiangh0", "id": 24559732, "node_id": "MDQ6VXNlcjI0NTU5NzMy", "avatar_url": "https://avatars.githubusercontent.com/u/24559732?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jiangh0", "html_url": "https://github.com/jiangh0", "followers_url": "https://api.github.com/users/jiangh0/followers", "following_url": "https://api.github.com/users/jiangh0/following{/other_user}", "gists_url": "https://api.github.com/users/jiangh0/gists{/gist_id}", "starred_url": "https://api.github.com/users/jiangh0/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jiangh0/subscriptions", "organizations_url": "https://api.github.com/users/jiangh0/orgs", "repos_url": "https://api.github.com/users/jiangh0/repos", "events_url": "https://api.github.com/users/jiangh0/events{/privacy}", "received_events_url": "https://api.github.com/users/jiangh0/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
0
"2022-06-28T16:26:03"
"2022-06-29T06:26:28"
"2022-06-29T06:25:08"
NONE
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/4589/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/4589/timeline
null
completed
null
null
false