comments_url
stringlengths
70
70
timeline_url
stringlengths
70
70
closed_at
stringlengths
20
20
performed_via_github_app
null
state_reason
stringclasses
3 values
node_id
stringlengths
18
32
state
stringclasses
2 values
assignees
listlengths
0
4
draft
bool
2 classes
number
int64
1.61k
6.73k
user
dict
title
stringlengths
1
290
events_url
stringlengths
68
68
milestone
dict
labels_url
stringlengths
75
75
created_at
stringlengths
20
20
active_lock_reason
null
locked
bool
1 class
assignee
dict
pull_request
dict
id
int64
771M
2.18B
labels
listlengths
0
4
url
stringlengths
61
61
comments
sequencelengths
0
30
repository_url
stringclasses
1 value
author_association
stringclasses
3 values
body
stringlengths
0
228k
updated_at
stringlengths
20
20
html_url
stringlengths
49
51
reactions
dict
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/2015/comments
https://api.github.com/repos/huggingface/datasets/issues/2015/timeline
2021-03-09T14:06:03Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg3OTg4NTQ0
closed
[]
false
2,015
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix ipython function creation in tests
https://api.github.com/repos/huggingface/datasets/issues/2015/events
null
https://api.github.com/repos/huggingface/datasets/issues/2015/labels{/name}
2021-03-09T13:36:59Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2015.diff", "html_url": "https://github.com/huggingface/datasets/pull/2015", "merged_at": "2021-03-09T14:06:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/2015.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2015" }
825,942,108
[]
https://api.github.com/repos/huggingface/datasets/issues/2015
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
The test at `tests/test_caching.py::RecurseDumpTest::test_dump_ipython_function` was failing in python 3.8 because the ipython function was not properly created. Fix #2010
2021-03-09T14:06:04Z
https://github.com/huggingface/datasets/pull/2015
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2015/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2014/comments
https://api.github.com/repos/huggingface/datasets/issues/2014/timeline
2021-03-10T10:08:36Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg3OTY1NDg3
closed
[]
false
2,014
{ "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" }
more explicit method parameters
https://api.github.com/repos/huggingface/datasets/issues/2014/events
null
https://api.github.com/repos/huggingface/datasets/issues/2014/labels{/name}
2021-03-09T13:18:29Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2014.diff", "html_url": "https://github.com/huggingface/datasets/pull/2014", "merged_at": "2021-03-10T10:08:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/2014.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2014" }
825,916,531
[]
https://api.github.com/repos/huggingface/datasets/issues/2014
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
re: #2009 not super convinced this is better, and while I usually fight against kwargs here it seems to me that it better conveys the relationship to the `_split_generator` method.
2021-03-10T10:08:37Z
https://github.com/huggingface/datasets/pull/2014
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2014/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2013/comments
https://api.github.com/repos/huggingface/datasets/issues/2013/timeline
2021-03-09T19:27:06Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg3NzYzMTgx
closed
[]
false
2,013
{ "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" }
Add Cryptonite dataset
https://api.github.com/repos/huggingface/datasets/issues/2013/events
null
https://api.github.com/repos/huggingface/datasets/issues/2013/labels{/name}
2021-03-09T10:32:11Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2013.diff", "html_url": "https://github.com/huggingface/datasets/pull/2013", "merged_at": "2021-03-09T19:27:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/2013.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2013" }
825,694,305
[]
https://api.github.com/repos/huggingface/datasets/issues/2013
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
cc @aviaefrat who's the original author of the dataset & paper, see https://github.com/aviaefrat/cryptonite
2021-03-09T19:27:07Z
https://github.com/huggingface/datasets/pull/2013
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2013/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2012/comments
https://api.github.com/repos/huggingface/datasets/issues/2012/timeline
2021-03-09T11:33:31Z
null
completed
MDU6SXNzdWU4MjU2MzQwNjQ=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
2,012
{ "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" }
No upstream branch
https://api.github.com/repos/huggingface/datasets/issues/2012/events
null
https://api.github.com/repos/huggingface/datasets/issues/2012/labels{/name}
2021-03-09T09:48:55Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
null
825,634,064
[ { "color": "0075ca", "default": true, "description": "Improvements or additions to documentation", "id": 1935892861, "name": "documentation", "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation" } ]
https://api.github.com/repos/huggingface/datasets/issues/2012
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Feels like the documentation on adding a new dataset is outdated? https://github.com/huggingface/datasets/blob/987df6b4e9e20fc0c92bc9df48137d170756fd7b/ADD_NEW_DATASET.md#L49-L54 There is no upstream branch on remote.
2021-03-09T11:33:31Z
https://github.com/huggingface/datasets/issues/2012
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2012/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2011/comments
https://api.github.com/repos/huggingface/datasets/issues/2011/timeline
2021-03-11T18:00:52Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg3Njk4MTAx
closed
[]
false
2,011
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Add RoSent Dataset
https://api.github.com/repos/huggingface/datasets/issues/2011/events
null
https://api.github.com/repos/huggingface/datasets/issues/2011/labels{/name}
2021-03-09T09:40:08Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2011.diff", "html_url": "https://github.com/huggingface/datasets/pull/2011", "merged_at": "2021-03-11T18:00:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/2011.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2011" }
825,621,952
[]
https://api.github.com/repos/huggingface/datasets/issues/2011
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR adds a Romanian sentiment analysis dataset. This PR also closes pending PR #1529. I had to add an `original_id` feature because the dataset files have repeated IDs. I can remove them if needed. I have also added `id` which is unique. Let me know in case of any issues.
2021-03-11T18:00:52Z
https://github.com/huggingface/datasets/pull/2011
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2011/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2010/comments
https://api.github.com/repos/huggingface/datasets/issues/2010/timeline
2021-03-09T14:06:03Z
null
completed
MDU6SXNzdWU4MjU1Njc2MzU=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
2,010
{ "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" }
Local testing fails
https://api.github.com/repos/huggingface/datasets/issues/2010/events
null
https://api.github.com/repos/huggingface/datasets/issues/2010/labels{/name}
2021-03-09T09:01:38Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
null
825,567,635
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
https://api.github.com/repos/huggingface/datasets/issues/2010
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
I'm following the CI setup as described in https://github.com/huggingface/datasets/blob/8eee4fa9e133fe873a7993ba746d32ca2b687551/.circleci/config.yml#L16-L19 in a new conda environment, at commit https://github.com/huggingface/datasets/commit/4de6dbf84e93dad97e1000120d6628c88954e5d4 and getting ``` FAILED tests/test_caching.py::RecurseDumpTest::test_dump_ipython_function - TypeError: an integer is required (got type bytes) 1 failed, 2321 passed, 5109 skipped, 10 warnings in 124.32s (0:02:04) ``` Seems like a discrepancy with CI, perhaps a lib version that's not controlled? Tried with `pyarrow=={1.0.0,0.17.1,2.0.0}`
2021-03-09T14:06:03Z
https://github.com/huggingface/datasets/issues/2010
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2010/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2009/comments
https://api.github.com/repos/huggingface/datasets/issues/2009/timeline
2021-03-12T15:01:34Z
null
completed
MDU6SXNzdWU4MjU1NDEzNjY=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" } ]
null
2,009
{ "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" }
Ambiguous documentation
https://api.github.com/repos/huggingface/datasets/issues/2009/events
null
https://api.github.com/repos/huggingface/datasets/issues/2009/labels{/name}
2021-03-09T08:42:11Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/17948980?v=4", "events_url": "https://api.github.com/users/theo-m/events{/privacy}", "followers_url": "https://api.github.com/users/theo-m/followers", "following_url": "https://api.github.com/users/theo-m/following{/other_user}", "gists_url": "https://api.github.com/users/theo-m/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/theo-m", "id": 17948980, "login": "theo-m", "node_id": "MDQ6VXNlcjE3OTQ4OTgw", "organizations_url": "https://api.github.com/users/theo-m/orgs", "received_events_url": "https://api.github.com/users/theo-m/received_events", "repos_url": "https://api.github.com/users/theo-m/repos", "site_admin": false, "starred_url": "https://api.github.com/users/theo-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/theo-m/subscriptions", "type": "User", "url": "https://api.github.com/users/theo-m" }
null
825,541,366
[ { "color": "0075ca", "default": true, "description": "Improvements or additions to documentation", "id": 1935892861, "name": "documentation", "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation" } ]
https://api.github.com/repos/huggingface/datasets/issues/2009
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
https://github.com/huggingface/datasets/blob/2ac9a0d24a091989f869af55f9f6411b37ff5188/templates/new_dataset_script.py#L156-L158 Looking at the template, I find this documentation line to be confusing, the method parameters don't include the `gen_kwargs` so I'm unclear where they're coming from. Happy to push a PR with a clearer statement when I understand the meaning.
2021-03-12T15:01:34Z
https://github.com/huggingface/datasets/issues/2009
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2009/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2008/comments
https://api.github.com/repos/huggingface/datasets/issues/2008/timeline
2021-03-09T10:21:32Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg3Mjc1Njk4
closed
[]
false
2,008
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
Fix various typos/grammer in the docs
https://api.github.com/repos/huggingface/datasets/issues/2008/events
null
https://api.github.com/repos/huggingface/datasets/issues/2008/labels{/name}
2021-03-09T01:39:28Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2008.diff", "html_url": "https://github.com/huggingface/datasets/pull/2008", "merged_at": "2021-03-09T10:21:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/2008.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2008" }
825,153,804
[]
https://api.github.com/repos/huggingface/datasets/issues/2008
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR: * fixes various typos/grammer I came across while reading the docs * adds the "Install with conda" installation instructions Closes #1959
2021-03-15T18:42:49Z
https://github.com/huggingface/datasets/pull/2008
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2008/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2007/comments
https://api.github.com/repos/huggingface/datasets/issues/2007/timeline
2021-08-04T18:02:25Z
null
completed
MDU6SXNzdWU4MjQ1MTgxNTg=
closed
[]
null
2,007
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
How to not load huggingface datasets into memory
https://api.github.com/repos/huggingface/datasets/issues/2007/events
null
https://api.github.com/repos/huggingface/datasets/issues/2007/labels{/name}
2021-03-08T12:35:26Z
null
false
null
null
824,518,158
[]
https://api.github.com/repos/huggingface/datasets/issues/2007
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I am running this example from transformers library version 4.3.3: (Here is the full documentation https://github.com/huggingface/transformers/issues/8771 but the running command should work out of the box) USE_TF=0 deepspeed run_seq2seq.py --model_name_or_path google/mt5-base --dataset_name wmt16 --dataset_config_name ro-en --source_prefix "translate English to Romanian: " --task translation_en_to_ro --output_dir /test/test_large --do_train --do_eval --predict_with_generate --max_train_samples 500 --max_val_samples 500 --max_source_length 128 --max_target_length 128 --sortish_sampler --per_device_train_batch_size 8 --val_max_target_length 128 --deepspeed ds_config.json --num_train_epochs 1 --eval_steps 25000 --warmup_steps 500 --overwrite_output_dir (Here please find the script: https://github.com/huggingface/transformers/blob/master/examples/seq2seq/run_seq2seq.py) If you do not pass max_train_samples in above command to load the full dataset, then I get memory issue on a gpu with 24 GigBytes of memory. I need to train large-scale mt5 model on large-scale datasets of wikipedia (multiple of them concatenated or other datasets in multiple languages like OPUS), could you help me how I can avoid loading the full data into memory? to make the scripts not related to data size? In above example, I was hoping the script could work without relying on dataset size, so I can still train the model without subsampling training set. thank you so much @lhoestq for your great help in advance
2021-08-04T18:02:25Z
https://github.com/huggingface/datasets/issues/2007
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2007/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2006/comments
https://api.github.com/repos/huggingface/datasets/issues/2006/timeline
2021-03-08T11:28:34Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg2Njg5Nzk2
closed
[]
false
2,006
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Don't gitignore dvc.lock
https://api.github.com/repos/huggingface/datasets/issues/2006/events
null
https://api.github.com/repos/huggingface/datasets/issues/2006/labels{/name}
2021-03-08T11:13:08Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2006.diff", "html_url": "https://github.com/huggingface/datasets/pull/2006", "merged_at": "2021-03-08T11:28:34Z", "patch_url": "https://github.com/huggingface/datasets/pull/2006.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2006" }
824,457,794
[]
https://api.github.com/repos/huggingface/datasets/issues/2006
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
The benchmarks runs are [failing](https://github.com/huggingface/datasets/runs/2055534629?check_suite_focus=true) because of ``` ERROR: 'dvc.lock' is git-ignored. ``` I removed the dvc.lock file from the gitignore to fix that
2021-03-08T11:28:35Z
https://github.com/huggingface/datasets/pull/2006
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2006/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2005/comments
https://api.github.com/repos/huggingface/datasets/issues/2005/timeline
2021-03-09T17:58:13Z
null
completed
MDU6SXNzdWU4MjQyNzUwMzU=
closed
[]
null
2,005
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Setting to torch format not working with torchvision and MNIST
https://api.github.com/repos/huggingface/datasets/issues/2005/events
null
https://api.github.com/repos/huggingface/datasets/issues/2005/labels{/name}
2021-03-08T07:38:11Z
null
false
null
null
824,275,035
[]
https://api.github.com/repos/huggingface/datasets/issues/2005
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi I am trying to use `torchvision.transforms` to handle the transformation of the image data in the `mnist` dataset. Assume I have a `transform` variable which contains the `torchvision.transforms` object. A snippet of what I am trying to do: ```python def prepare_features(examples): images = [] labels = [] for example_idx, example in enumerate(examples["image"]): if transform is not None: images.append(transform( np.array(examples["image"][example_idx], dtype=np.uint8) )) else: images.append(torch.tensor(np.array(examples["image"][example_idx], dtype=np.uint8))) labels.append(torch.tensor(examples["label"][example_idx])) output = {"label":labels, "image":images} return output raw_dataset = load_dataset('mnist') train_dataset = raw_dataset.map(prepare_features, batched=True, batch_size=10000) train_dataset.set_format("torch",columns=["image","label"]) ``` After this, I check the type of the following: ```python print(type(train_dataset["train"]["label"])) print(type(train_dataset["train"]["image"][0])) ``` This leads to the following output: ```python <class 'torch.Tensor'> <class 'list'> ``` I use `torch.utils.DataLoader` for batches, the type of `batch["train"]["image"]` is also `<class 'list'>`. I don't understand why only the `label` is converted to a torch tensor, why does the image not get converted? How can I fix this issue? Thanks, Gunjan EDIT: I just checked the shapes, and the types, `batch[image]` is a actually a list of list of tensors. Shape is (1,28,2,28), where `batch_size` is 2. I don't understand why this is happening. Ideally it should be a tensor of shape (2,1,28,28). EDIT 2: Inside `prepare_train_features`, the shape of `images[0]` is `torch.Size([1,28,28])`, the conversion is working. However, the output of the `map` is a list of list of list of list.
2021-03-09T17:58:13Z
https://github.com/huggingface/datasets/issues/2005
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2005/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2004/comments
https://api.github.com/repos/huggingface/datasets/issues/2004/timeline
2021-03-17T10:43:20Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg2MzcyODY1
closed
[]
false
2,004
{ "avatar_url": "https://avatars.githubusercontent.com/u/6823177?v=4", "events_url": "https://api.github.com/users/MihaelaGaman/events{/privacy}", "followers_url": "https://api.github.com/users/MihaelaGaman/followers", "following_url": "https://api.github.com/users/MihaelaGaman/following{/other_user}", "gists_url": "https://api.github.com/users/MihaelaGaman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MihaelaGaman", "id": 6823177, "login": "MihaelaGaman", "node_id": "MDQ6VXNlcjY4MjMxNzc=", "organizations_url": "https://api.github.com/users/MihaelaGaman/orgs", "received_events_url": "https://api.github.com/users/MihaelaGaman/received_events", "repos_url": "https://api.github.com/users/MihaelaGaman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MihaelaGaman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MihaelaGaman/subscriptions", "type": "User", "url": "https://api.github.com/users/MihaelaGaman" }
LaRoSeDa
https://api.github.com/repos/huggingface/datasets/issues/2004/events
null
https://api.github.com/repos/huggingface/datasets/issues/2004/labels{/name}
2021-03-08T01:06:32Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2004.diff", "html_url": "https://github.com/huggingface/datasets/pull/2004", "merged_at": "2021-03-17T10:43:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/2004.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2004" }
824,080,760
[]
https://api.github.com/repos/huggingface/datasets/issues/2004
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Add LaRoSeDa to huggingface datasets.
2021-03-17T10:43:20Z
https://github.com/huggingface/datasets/pull/2004
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2004/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2003/comments
https://api.github.com/repos/huggingface/datasets/issues/2003/timeline
2023-07-25T16:35:21Z
null
completed
MDU6SXNzdWU4MjQwMzQ2Nzg=
closed
[]
null
2,003
{ "avatar_url": "https://avatars.githubusercontent.com/u/1367529?v=4", "events_url": "https://api.github.com/users/mahnerak/events{/privacy}", "followers_url": "https://api.github.com/users/mahnerak/followers", "following_url": "https://api.github.com/users/mahnerak/following{/other_user}", "gists_url": "https://api.github.com/users/mahnerak/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mahnerak", "id": 1367529, "login": "mahnerak", "node_id": "MDQ6VXNlcjEzNjc1Mjk=", "organizations_url": "https://api.github.com/users/mahnerak/orgs", "received_events_url": "https://api.github.com/users/mahnerak/received_events", "repos_url": "https://api.github.com/users/mahnerak/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mahnerak/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mahnerak/subscriptions", "type": "User", "url": "https://api.github.com/users/mahnerak" }
Messages are being printed to the `stdout`
https://api.github.com/repos/huggingface/datasets/issues/2003/events
null
https://api.github.com/repos/huggingface/datasets/issues/2003/labels{/name}
2021-03-07T22:09:34Z
null
false
null
null
824,034,678
[]
https://api.github.com/repos/huggingface/datasets/issues/2003
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
In this code segment, we can see some messages are being printed to the `stdout`. https://github.com/huggingface/datasets/blob/7e60bb509b595e8edc60a87f32b2bacfc065d607/src/datasets/builder.py#L545-L554 According to the comment, it is done intentionally, but I don't really understand why don't we log it with a higher level or print it directly to the `stderr`. In my opinion, this kind of messages should never printed to the stdout. At least some configuration/flag should make it possible to provide in order to explicitly prevent the package to contaminate the stdout.
2023-07-25T16:35:21Z
https://github.com/huggingface/datasets/issues/2003
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2003/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2002/comments
https://api.github.com/repos/huggingface/datasets/issues/2002/timeline
2021-03-19T09:52:06Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg2MjgwNzE3
closed
[]
false
2,002
{ "avatar_url": "https://avatars.githubusercontent.com/u/6823177?v=4", "events_url": "https://api.github.com/users/MihaelaGaman/events{/privacy}", "followers_url": "https://api.github.com/users/MihaelaGaman/followers", "following_url": "https://api.github.com/users/MihaelaGaman/following{/other_user}", "gists_url": "https://api.github.com/users/MihaelaGaman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MihaelaGaman", "id": 6823177, "login": "MihaelaGaman", "node_id": "MDQ6VXNlcjY4MjMxNzc=", "organizations_url": "https://api.github.com/users/MihaelaGaman/orgs", "received_events_url": "https://api.github.com/users/MihaelaGaman/received_events", "repos_url": "https://api.github.com/users/MihaelaGaman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MihaelaGaman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MihaelaGaman/subscriptions", "type": "User", "url": "https://api.github.com/users/MihaelaGaman" }
MOROCO
https://api.github.com/repos/huggingface/datasets/issues/2002/events
null
https://api.github.com/repos/huggingface/datasets/issues/2002/labels{/name}
2021-03-07T16:22:17Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/2002.diff", "html_url": "https://github.com/huggingface/datasets/pull/2002", "merged_at": "2021-03-19T09:52:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/2002.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2002" }
823,955,744
[]
https://api.github.com/repos/huggingface/datasets/issues/2002
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Add MOROCO to huggingface datasets.
2021-03-19T09:52:06Z
https://github.com/huggingface/datasets/pull/2002
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2002/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/2001/comments
https://api.github.com/repos/huggingface/datasets/issues/2001/timeline
2021-03-17T05:51:01Z
null
completed
MDU6SXNzdWU4MjM5NDY3MDY=
closed
[]
null
2,001
{ "avatar_url": "https://avatars.githubusercontent.com/u/16605764?v=4", "events_url": "https://api.github.com/users/donggyukimc/events{/privacy}", "followers_url": "https://api.github.com/users/donggyukimc/followers", "following_url": "https://api.github.com/users/donggyukimc/following{/other_user}", "gists_url": "https://api.github.com/users/donggyukimc/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/donggyukimc", "id": 16605764, "login": "donggyukimc", "node_id": "MDQ6VXNlcjE2NjA1NzY0", "organizations_url": "https://api.github.com/users/donggyukimc/orgs", "received_events_url": "https://api.github.com/users/donggyukimc/received_events", "repos_url": "https://api.github.com/users/donggyukimc/repos", "site_admin": false, "starred_url": "https://api.github.com/users/donggyukimc/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/donggyukimc/subscriptions", "type": "User", "url": "https://api.github.com/users/donggyukimc" }
Empty evidence document ("provenance") in KILT ELI5 dataset
https://api.github.com/repos/huggingface/datasets/issues/2001/events
null
https://api.github.com/repos/huggingface/datasets/issues/2001/labels{/name}
2021-03-07T15:41:35Z
null
false
null
null
823,946,706
[]
https://api.github.com/repos/huggingface/datasets/issues/2001
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
In the original KILT benchmark(https://github.com/facebookresearch/KILT), all samples has its evidence document (i.e. wikipedia page id) for prediction. For example, a sample in ELI5 dataset has the format including provenance (=evidence document) like this `{"id": "1kiwfx", "input": "In Trading Places (1983, Akroyd/Murphy) how does the scheme at the end of the movie work? Why would buying a lot of OJ at a high price ruin the Duke Brothers?", "output": [{"answer": "I feel so old. People have been askinbg what happened at the end of this movie for what must be the last 15 years of my life. It never stops. Every year/month/fortnight, I see someone asking what happened, and someone explaining. Andf it will keep on happening, until I am 90yrs old, in a home, with nothing but the Internet and my bladder to keep me going. And there it will be: \"what happens at the end of Trading Places?\""}, {"provenance": [{"wikipedia_id": "242855", "title": "Futures contract", "section": "Section::::Abstract.", "start_paragraph_id": 1, "start_character": 14, "end_paragraph_id": 1, "end_character": 612, "bleu_score": 0.9232808519770748}]}], "meta": {"partial_evidence": [{"wikipedia_id": "520990", "title": "Trading Places", "section": "Section::::Plot.\n", "start_paragraph_id": 7, "end_paragraph_id": 7, "meta": {"evidence_span": ["On television, they learn that Clarence Beeks is transporting a secret USDA report on orange crop forecasts.", "On television, they learn that Clarence Beeks is transporting a secret USDA report on orange crop forecasts. Winthorpe and Valentine recall large payments made to Beeks by the Dukes and realize that the Dukes plan to obtain the report to corner the market on frozen orange juice.", "Winthorpe and Valentine recall large payments made to Beeks by the Dukes and realize that the Dukes plan to obtain the report to corner the market on frozen orange juice."]}}]}}` However, KILT ELI5 dataset from huggingface datasets library only contain empty list of provenance. `{'id': '1oy5tc', 'input': 'in football whats the point of wasting the first two plays with a rush - up the middle - not regular rush plays i get those', 'meta': {'left_context': '', 'mention': '', 'obj_surface': [], 'partial_evidence': [], 'right_context': '', 'sub_surface': [], 'subj_aliases': [], 'template_questions': []}, 'output': [{'answer': 'In most cases the O-Line is supposed to make a hole for the running back to go through. If you run too many plays to the outside/throws the defense will catch on.\n\nAlso, 2 5 yard plays gets you a new set of downs.', 'meta': {'score': 2}, 'provenance': []}, {'answer': "I you don't like those type of plays, watch CFL. We only get 3 downs so you can't afford to waste one. Lots more passing.", 'meta': {'score': 2}, 'provenance': []}]} ` should i perform other procedure to obtain evidence documents?
2022-12-19T19:25:14Z
https://github.com/huggingface/datasets/issues/2001
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2001/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/2000/comments
https://api.github.com/repos/huggingface/datasets/issues/2000/timeline
2021-03-09T12:42:57Z
null
completed
MDU6SXNzdWU4MjM4OTk5MTA=
closed
[]
null
2,000
{ "avatar_url": "https://avatars.githubusercontent.com/u/73881148?v=4", "events_url": "https://api.github.com/users/itsLuisa/events{/privacy}", "followers_url": "https://api.github.com/users/itsLuisa/followers", "following_url": "https://api.github.com/users/itsLuisa/following{/other_user}", "gists_url": "https://api.github.com/users/itsLuisa/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/itsLuisa", "id": 73881148, "login": "itsLuisa", "node_id": "MDQ6VXNlcjczODgxMTQ4", "organizations_url": "https://api.github.com/users/itsLuisa/orgs", "received_events_url": "https://api.github.com/users/itsLuisa/received_events", "repos_url": "https://api.github.com/users/itsLuisa/repos", "site_admin": false, "starred_url": "https://api.github.com/users/itsLuisa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/itsLuisa/subscriptions", "type": "User", "url": "https://api.github.com/users/itsLuisa" }
Windows Permission Error (most recent version of datasets)
https://api.github.com/repos/huggingface/datasets/issues/2000/events
null
https://api.github.com/repos/huggingface/datasets/issues/2000/labels{/name}
2021-03-07T11:55:28Z
null
false
null
null
823,899,910
[]
https://api.github.com/repos/huggingface/datasets/issues/2000
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi everyone, Can anyone help me with why the dataset loading script below raises a Windows Permission Error? I stuck quite closely to https://github.com/huggingface/datasets/blob/master/datasets/conll2003/conll2003.py , only I want to load the data from three local three-column tsv-files (id\ttokens\tpos_tags\n). I am using the most recent version of datasets. Thank you in advance! Luisa My script: ``` import datasets import csv logger = datasets.logging.get_logger(__name__) class SampleConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super(SampleConfig, self).__init__(**kwargs) class Sample(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ SampleConfig(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"), ] def _info(self): return datasets.DatasetInfo( description="Dataset with words and their POS-Tags", features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "pos_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "''", ",", "-LRB-", "-RRB-", ".", ":", "CC", "CD", "DT", "EX", "FW", "HYPH", "IN", "JJ", "JJR", "JJS", "MD", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WRB", "``" ] ) ), } ), supervised_keys=None, homepage="https://catalog.ldc.upenn.edu/LDC2011T03", citation="Weischedel, Ralph, et al. OntoNotes Release 4.0 LDC2011T03. Web Download. Philadelphia: Linguistic Data Consortium, 2011.", ) def _split_generators(self, dl_manager): loaded_files = dl_manager.download_and_extract(self.config.data_files) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": loaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": loaded_files["test"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": loaded_files["val"]}) ] def _generate_examples(self, filepath): logger.info("generating examples from = %s", filepath) with open(filepath, encoding="cp1252") as f: data = csv.reader(f, delimiter="\t") ids = list() tokens = list() pos_tags = list() for id_, line in enumerate(data): #print(line) if len(line) == 1: if tokens: yield id_, {"id": ids, "tokens": tokens, "pos_tags": pos_tags} ids = list() tokens = list() pos_tags = list() else: ids.append(line[0]) tokens.append(line[1]) pos_tags.append(line[2]) # last example yield id_, {"id": ids, "tokens": tokens, "pos_tags": pos_tags} def main(): dataset = datasets.load_dataset( "data_loading.py", data_files={ "train": "train.tsv", "test": "test.tsv", "val": "val.tsv" } ) #print(dataset) if __name__=="__main__": main() ```
2021-03-09T12:42:57Z
https://github.com/huggingface/datasets/issues/2000
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2000/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1999/comments
https://api.github.com/repos/huggingface/datasets/issues/1999/timeline
2021-03-09T09:52:11Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg2MTM5ODMy
closed
[]
false
1,999
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Add FashionMNIST dataset
https://api.github.com/repos/huggingface/datasets/issues/1999/events
null
https://api.github.com/repos/huggingface/datasets/issues/1999/labels{/name}
2021-03-06T21:36:57Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1999.diff", "html_url": "https://github.com/huggingface/datasets/pull/1999", "merged_at": "2021-03-09T09:52:11Z", "patch_url": "https://github.com/huggingface/datasets/pull/1999.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1999" }
823,753,591
[]
https://api.github.com/repos/huggingface/datasets/issues/1999
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR adds [FashionMNIST](https://github.com/zalandoresearch/fashion-mnist) dataset.
2021-03-09T09:52:11Z
https://github.com/huggingface/datasets/pull/1999
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1999/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1998/comments
https://api.github.com/repos/huggingface/datasets/issues/1998/timeline
2021-03-11T02:20:07Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg2MTE4NTQ4
closed
[]
false
1,998
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
Add -DOCSTART- note to dataset card of conll-like datasets
https://api.github.com/repos/huggingface/datasets/issues/1998/events
null
https://api.github.com/repos/huggingface/datasets/issues/1998/labels{/name}
2021-03-06T19:08:29Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1998.diff", "html_url": "https://github.com/huggingface/datasets/pull/1998", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1998.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1998" }
823,723,960
[]
https://api.github.com/repos/huggingface/datasets/issues/1998
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Closes #1983
2021-03-11T02:20:07Z
https://github.com/huggingface/datasets/pull/1998
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1998/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1997/comments
https://api.github.com/repos/huggingface/datasets/issues/1997/timeline
2021-03-06T16:13:26Z
null
completed
MDU6SXNzdWU4MjM2Nzk0NjU=
closed
[]
null
1,997
{ "avatar_url": "https://avatars.githubusercontent.com/u/5087210?v=4", "events_url": "https://api.github.com/users/futianfan/events{/privacy}", "followers_url": "https://api.github.com/users/futianfan/followers", "following_url": "https://api.github.com/users/futianfan/following{/other_user}", "gists_url": "https://api.github.com/users/futianfan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/futianfan", "id": 5087210, "login": "futianfan", "node_id": "MDQ6VXNlcjUwODcyMTA=", "organizations_url": "https://api.github.com/users/futianfan/orgs", "received_events_url": "https://api.github.com/users/futianfan/received_events", "repos_url": "https://api.github.com/users/futianfan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/futianfan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/futianfan/subscriptions", "type": "User", "url": "https://api.github.com/users/futianfan" }
from datasets import MoleculeDataset, GEOMDataset
https://api.github.com/repos/huggingface/datasets/issues/1997/events
null
https://api.github.com/repos/huggingface/datasets/issues/1997/labels{/name}
2021-03-06T15:50:19Z
null
false
null
null
823,679,465
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
https://api.github.com/repos/huggingface/datasets/issues/1997
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I met the ImportError: cannot import name 'MoleculeDataset' from 'datasets'. Have anyone met the similar issues? Thanks!
2021-03-06T16:13:26Z
https://github.com/huggingface/datasets/issues/1997
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1997/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1996/comments
https://api.github.com/repos/huggingface/datasets/issues/1996/timeline
2022-10-05T13:24:26Z
null
completed
MDU6SXNzdWU4MjM1NzM0MTA=
closed
[]
null
1,996
{ "avatar_url": "https://avatars.githubusercontent.com/u/6879673?v=4", "events_url": "https://api.github.com/users/elgeish/events{/privacy}", "followers_url": "https://api.github.com/users/elgeish/followers", "following_url": "https://api.github.com/users/elgeish/following{/other_user}", "gists_url": "https://api.github.com/users/elgeish/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/elgeish", "id": 6879673, "login": "elgeish", "node_id": "MDQ6VXNlcjY4Nzk2NzM=", "organizations_url": "https://api.github.com/users/elgeish/orgs", "received_events_url": "https://api.github.com/users/elgeish/received_events", "repos_url": "https://api.github.com/users/elgeish/repos", "site_admin": false, "starred_url": "https://api.github.com/users/elgeish/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/elgeish/subscriptions", "type": "User", "url": "https://api.github.com/users/elgeish" }
Error when exploring `arabic_speech_corpus`
https://api.github.com/repos/huggingface/datasets/issues/1996/events
null
https://api.github.com/repos/huggingface/datasets/issues/1996/labels{/name}
2021-03-06T05:55:20Z
null
false
null
null
823,573,410
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "94203D", "default": false, "description": "", "id": 2107841032, "name": "nlp-viewer", "node_id": "MDU6TGFiZWwyMTA3ODQxMDMy", "url": "https://api.github.com/repos/huggingface/datasets/labels/nlp-viewer" }, { "color": "d93f0b", "default": false, "description": "", "id": 2725241052, "name": "speech", "node_id": "MDU6TGFiZWwyNzI1MjQxMDUy", "url": "https://api.github.com/repos/huggingface/datasets/labels/speech" } ]
https://api.github.com/repos/huggingface/datasets/issues/1996
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Navigate to https://huggingface.co/datasets/viewer/?dataset=arabic_speech_corpus Error: ``` ImportError: To be able to use this dataset, you need to install the following dependencies['soundfile'] using 'pip install soundfile' for instance' Traceback: File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/script_runner.py", line 332, in _run_script exec(code, module.__dict__) File "/home/sasha/nlp-viewer/run.py", line 233, in <module> configs = get_confs(option) File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/caching.py", line 604, in wrapped_func return get_or_create_cached_value() File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/caching.py", line 588, in get_or_create_cached_value return_value = func(*args, **kwargs) File "/home/sasha/nlp-viewer/run.py", line 145, in get_confs module_path = nlp.load.prepare_module(path, dataset=True File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/datasets/load.py", line 342, in prepare_module f"To be able to use this {module_type}, you need to install the following dependencies" ```
2022-10-05T13:24:26Z
https://github.com/huggingface/datasets/issues/1996
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1996/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1995/comments
https://api.github.com/repos/huggingface/datasets/issues/1995/timeline
2021-03-05T08:58:59Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg1NDI5NTg0
closed
[]
false
1,995
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[Timit_asr] Make sure not only the first sample is used
https://api.github.com/repos/huggingface/datasets/issues/1995/events
null
https://api.github.com/repos/huggingface/datasets/issues/1995/labels{/name}
2021-03-05T08:42:51Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1995.diff", "html_url": "https://github.com/huggingface/datasets/pull/1995", "merged_at": "2021-03-05T08:58:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/1995.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1995" }
822,878,431
[]
https://api.github.com/repos/huggingface/datasets/issues/1995
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
When playing around with timit I noticed that only the first sample is used for all indices. I corrected this typo so that the dataset is correctly loaded.
2021-06-30T06:25:53Z
https://github.com/huggingface/datasets/pull/1995
{ "+1": 4, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/1995/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1994/comments
https://api.github.com/repos/huggingface/datasets/issues/1994/timeline
null
null
null
MDU6SXNzdWU4MjI4NzEyMzg=
open
[]
null
1,994
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
not being able to get wikipedia es language
https://api.github.com/repos/huggingface/datasets/issues/1994/events
null
https://api.github.com/repos/huggingface/datasets/issues/1994/labels{/name}
2021-03-05T08:31:48Z
null
false
null
null
822,871,238
[]
https://api.github.com/repos/huggingface/datasets/issues/1994
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I am trying to run a code with wikipedia of config 20200501.es, getting: Traceback (most recent call last): File "run_mlm_t5.py", line 608, in <module> main() File "run_mlm_t5.py", line 359, in main datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) File "/dara/libs/anaconda3/envs/success432/lib/python3.7/site-packages/datasets-1.2.1-py3.7.egg/datasets/load.py", line 612, in load_dataset ignore_verifications=ignore_verifications, File "/dara/libs/anaconda3/envs/success432/lib/python3.7/site-packages/datasets-1.2.1-py3.7.egg/datasets/builder.py", line 527, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/dara/libs/anaconda3/envs/success432/lib/python3.7/site-packages/datasets-1.2.1-py3.7.egg/datasets/builder.py", line 1050, in _download_and_prepare "\n\t`{}`".format(usage_example) datasets.builder.MissingBeamOptions: Trying to generate a dataset using Apache Beam, yet no Beam Runner or PipelineOptions() has been provided in `load_dataset` or in the builder arguments. For big datasets it has to run on large-scale data processing tools like Dataflow, Spark, etc. More information about Apache Beam runners at https://beam.apache.org/documentation/runners/capability-matrix/ If you really want to run it locally because you feel like the Dataset is small enough, you can use the local beam runner called `DirectRunner` (you may run out of memory). Example of usage: `load_dataset('wikipedia', '20200501.es', beam_runner='DirectRunner')` thanks @lhoestq for any suggestion/help
2021-03-11T20:46:21Z
https://github.com/huggingface/datasets/issues/1994
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1994/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1993/comments
https://api.github.com/repos/huggingface/datasets/issues/1993/timeline
2021-03-22T04:05:50Z
null
completed
MDU6SXNzdWU4MjI3NTgzODc=
closed
[]
null
1,993
{ "avatar_url": "https://avatars.githubusercontent.com/u/16892570?v=4", "events_url": "https://api.github.com/users/shamanez/events{/privacy}", "followers_url": "https://api.github.com/users/shamanez/followers", "following_url": "https://api.github.com/users/shamanez/following{/other_user}", "gists_url": "https://api.github.com/users/shamanez/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shamanez", "id": 16892570, "login": "shamanez", "node_id": "MDQ6VXNlcjE2ODkyNTcw", "organizations_url": "https://api.github.com/users/shamanez/orgs", "received_events_url": "https://api.github.com/users/shamanez/received_events", "repos_url": "https://api.github.com/users/shamanez/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shamanez/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shamanez/subscriptions", "type": "User", "url": "https://api.github.com/users/shamanez" }
How to load a dataset with load_from disk and save it again after doing transformations without changing the original?
https://api.github.com/repos/huggingface/datasets/issues/1993/events
null
https://api.github.com/repos/huggingface/datasets/issues/1993/labels{/name}
2021-03-05T05:25:50Z
null
false
null
null
822,758,387
[]
https://api.github.com/repos/huggingface/datasets/issues/1993
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I am using the latest datasets library. In my work, I first use **load_from_disk** to load a data set that contains 3.8Gb information. Then during my training process, I update that dataset object and add new elements and save it in a different place. When I save the dataset with **save_to_disk**, the original dataset which is already in the disk also gets updated. I do not want to update it. How to prevent from this?
2021-03-22T04:05:50Z
https://github.com/huggingface/datasets/issues/1993
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1993/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1992/comments
https://api.github.com/repos/huggingface/datasets/issues/1992/timeline
null
null
null
MDU6SXNzdWU4MjI2NzIyMzg=
open
[]
null
1,992
{ "avatar_url": "https://avatars.githubusercontent.com/u/29157715?v=4", "events_url": "https://api.github.com/users/hwijeen/events{/privacy}", "followers_url": "https://api.github.com/users/hwijeen/followers", "following_url": "https://api.github.com/users/hwijeen/following{/other_user}", "gists_url": "https://api.github.com/users/hwijeen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hwijeen", "id": 29157715, "login": "hwijeen", "node_id": "MDQ6VXNlcjI5MTU3NzE1", "organizations_url": "https://api.github.com/users/hwijeen/orgs", "received_events_url": "https://api.github.com/users/hwijeen/received_events", "repos_url": "https://api.github.com/users/hwijeen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hwijeen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hwijeen/subscriptions", "type": "User", "url": "https://api.github.com/users/hwijeen" }
`datasets.map` multi processing much slower than single processing
https://api.github.com/repos/huggingface/datasets/issues/1992/events
null
https://api.github.com/repos/huggingface/datasets/issues/1992/labels{/name}
2021-03-05T02:10:02Z
null
false
null
null
822,672,238
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
https://api.github.com/repos/huggingface/datasets/issues/1992
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, thank you for the great library. I've been using datasets to pretrain language models, and it often involves datasets as large as ~70G. My data preparation step is roughly two steps: `load_dataset` which splits corpora into a table of sentences, and `map` converts a sentence into a list of integers, using a tokenizer. I noticed that `map` function with `num_proc=mp.cpu_count() //2` takes more than 20 hours to finish the job where as `num_proc=1` gets the job done in about 5 hours. The machine I used has 40 cores, with 126G of RAM. There were no other jobs when `map` function was running. What could be the reason? I would be happy to provide information necessary to spot the reason. p.s. I was experiencing the imbalance issue mentioned in [here](https://github.com/huggingface/datasets/issues/610#issuecomment-705177036) when I was using multi processing. p.s.2 When I run `map` with `num_proc=1`, I see one tqdm bar but all the cores are working. When `num_proc=20`, only 20 cores work. ![Screen Shot 2021-03-05 at 11 04 59](https://user-images.githubusercontent.com/29157715/110056895-ef6cf000-7da2-11eb-8307-6698e9fb1ad4.png)
2023-06-08T12:31:55Z
https://github.com/huggingface/datasets/issues/1992
{ "+1": 4, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 4, "url": "https://api.github.com/repos/huggingface/datasets/issues/1992/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1991/comments
https://api.github.com/repos/huggingface/datasets/issues/1991/timeline
2021-03-17T10:37:39Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg1MTYwNDkx
closed
[]
false
1,991
{ "avatar_url": "https://avatars.githubusercontent.com/u/21319243?v=4", "events_url": "https://api.github.com/users/ZihanWangKi/events{/privacy}", "followers_url": "https://api.github.com/users/ZihanWangKi/followers", "following_url": "https://api.github.com/users/ZihanWangKi/following{/other_user}", "gists_url": "https://api.github.com/users/ZihanWangKi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ZihanWangKi", "id": 21319243, "login": "ZihanWangKi", "node_id": "MDQ6VXNlcjIxMzE5MjQz", "organizations_url": "https://api.github.com/users/ZihanWangKi/orgs", "received_events_url": "https://api.github.com/users/ZihanWangKi/received_events", "repos_url": "https://api.github.com/users/ZihanWangKi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ZihanWangKi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ZihanWangKi/subscriptions", "type": "User", "url": "https://api.github.com/users/ZihanWangKi" }
Adding the conllpp dataset
https://api.github.com/repos/huggingface/datasets/issues/1991/events
null
https://api.github.com/repos/huggingface/datasets/issues/1991/labels{/name}
2021-03-04T22:19:43Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1991.diff", "html_url": "https://github.com/huggingface/datasets/pull/1991", "merged_at": "2021-03-17T10:37:39Z", "patch_url": "https://github.com/huggingface/datasets/pull/1991.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1991" }
822,554,473
[]
https://api.github.com/repos/huggingface/datasets/issues/1991
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Adding the conllpp dataset, is a revision from https://github.com/huggingface/datasets/pull/1910.
2021-03-17T10:37:39Z
https://github.com/huggingface/datasets/pull/1991
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1991/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1990/comments
https://api.github.com/repos/huggingface/datasets/issues/1990/timeline
2021-08-04T18:04:25Z
null
completed
MDU6SXNzdWU4MjIzODQ1MDI=
closed
[]
null
1,990
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
OSError: Memory mapping file failed: Cannot allocate memory
https://api.github.com/repos/huggingface/datasets/issues/1990/events
null
https://api.github.com/repos/huggingface/datasets/issues/1990/labels{/name}
2021-03-04T18:21:58Z
null
false
null
null
822,384,502
[]
https://api.github.com/repos/huggingface/datasets/issues/1990
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, I am trying to run a code with a wikipedia dataset, here is the command to reproduce the error. You can find the codes for run_mlm.py in huggingface repo here: https://github.com/huggingface/transformers/blob/v4.3.2/examples/language-modeling/run_mlm.py ``` python run_mlm.py --model_name_or_path bert-base-multilingual-cased --dataset_name wikipedia --dataset_config_name 20200501.en --do_train --do_eval --output_dir /dara/test --max_seq_length 128 ``` I am using transformer version: 4.3.2 But I got memory erorr using this dataset, is there a way I could save on memory with dataset library with wikipedia dataset? Specially I need to train a model with multiple of wikipedia datasets concatenated. thank you very much @lhoestq for your help and suggestions: ``` File "run_mlm.py", line 441, in <module> main() File "run_mlm.py", line 233, in main split=f"train[{data_args.validation_split_percentage}%:]", File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/load.py", line 750, in load_dataset ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory) File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/builder.py", line 740, in as_dataset map_tuple=True, File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/utils/py_utils.py", line 225, in map_nested return function(data_struct) File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/builder.py", line 757, in _build_single_dataset in_memory=in_memory, File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/builder.py", line 829, in _as_dataset in_memory=in_memory, File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/arrow_reader.py", line 215, in read return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/arrow_reader.py", line 236, in read_files pa_table = self._read_files(files, in_memory=in_memory) File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/arrow_reader.py", line 171, in _read_files pa_table: pa.Table = self._get_dataset_from_filename(f_dict, in_memory=in_memory) File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/arrow_reader.py", line 302, in _get_dataset_from_filename pa_table = ArrowReader.read_table(filename, in_memory=in_memory) File "/dara/libs/anaconda3/envs/code/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/arrow_reader.py", line 322, in read_table stream = stream_from(filename) File "pyarrow/io.pxi", line 782, in pyarrow.lib.memory_map File "pyarrow/io.pxi", line 743, in pyarrow.lib.MemoryMappedFile._open File "pyarrow/error.pxi", line 122, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 99, in pyarrow.lib.check_status OSError: Memory mapping file failed: Cannot allocate memory ```
2021-08-04T18:04:25Z
https://github.com/huggingface/datasets/issues/1990
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1990/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1989/comments
https://api.github.com/repos/huggingface/datasets/issues/1989/timeline
2023-07-24T14:39:33Z
null
completed
MDU6SXNzdWU4MjIzMjgxNDc=
closed
[]
null
1,989
{ "avatar_url": "https://avatars.githubusercontent.com/u/17202292?v=4", "events_url": "https://api.github.com/users/ioana-blue/events{/privacy}", "followers_url": "https://api.github.com/users/ioana-blue/followers", "following_url": "https://api.github.com/users/ioana-blue/following{/other_user}", "gists_url": "https://api.github.com/users/ioana-blue/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ioana-blue", "id": 17202292, "login": "ioana-blue", "node_id": "MDQ6VXNlcjE3MjAyMjky", "organizations_url": "https://api.github.com/users/ioana-blue/orgs", "received_events_url": "https://api.github.com/users/ioana-blue/received_events", "repos_url": "https://api.github.com/users/ioana-blue/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ioana-blue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ioana-blue/subscriptions", "type": "User", "url": "https://api.github.com/users/ioana-blue" }
Question/problem with dataset labels
https://api.github.com/repos/huggingface/datasets/issues/1989/events
null
https://api.github.com/repos/huggingface/datasets/issues/1989/labels{/name}
2021-03-04T17:06:53Z
null
false
null
null
822,328,147
[]
https://api.github.com/repos/huggingface/datasets/issues/1989
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi, I'm using a dataset with two labels "nurse" and "not nurse". For whatever reason (that I don't understand), I get an error that I think comes from the datasets package (using csv). Everything works fine if the labels are "nurse" and "surgeon". This is the trace I get: ``` File "../../../models/tr-4.3.2/run_puppets.py", line 523, in <module> main() File "../../../models/tr-4.3.2/run_puppets.py", line 249, in main datasets = load_dataset("csv", data_files=data_files) File "/dccstor/redrug_ier/envs/last-tr/lib/python3.8/site-packages/datasets/load.py", line 740, in load_dataset builder_instance.download_and_prepare( File "/dccstor/redrug_ier/envs/last-tr/lib/python3.8/site-packages/datasets/builder.py", line 572, in download_and_prepare self._download_and_prepare( File "/dccstor/redrug_ier/envs/last-tr/lib/python3.8/site-packages/datasets/builder.py", line 650, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/dccstor/redrug_ier/envs/last-tr/lib/python3.8/site-packages/datasets/builder.py", line 1028, in _prepare_split writer.write_table(table) File "/dccstor/redrug_ier/envs/last-tr/lib/python3.8/site-packages/datasets/arrow_writer.py", line 292, in write_table pa_table = pa_table.cast(self._schema) File "pyarrow/table.pxi", line 1311, in pyarrow.lib.Table.cast File "pyarrow/table.pxi", line 265, in pyarrow.lib.ChunkedArray.cast File "/dccstor/redrug_ier/envs/last-tr/lib/python3.8/site-packages/pyarrow/compute.py", line 87, in cast return call_function("cast", [arr], options) File "pyarrow/_compute.pyx", line 298, in pyarrow._compute.call_function File "pyarrow/_compute.pyx", line 192, in pyarrow._compute.Function.call File "pyarrow/error.pxi", line 122, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 84, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Failed to parse string: not nurse ``` Any ideas how to fix this? For now, I'll probably make them numeric.
2023-07-24T14:39:33Z
https://github.com/huggingface/datasets/issues/1989
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1989/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1988/comments
https://api.github.com/repos/huggingface/datasets/issues/1988/timeline
2021-08-04T18:05:23Z
null
completed
MDU6SXNzdWU4MjIzMjQ2MDU=
closed
[]
null
1,988
{ "avatar_url": "https://avatars.githubusercontent.com/u/878399?v=4", "events_url": "https://api.github.com/users/surak/events{/privacy}", "followers_url": "https://api.github.com/users/surak/followers", "following_url": "https://api.github.com/users/surak/following{/other_user}", "gists_url": "https://api.github.com/users/surak/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/surak", "id": 878399, "login": "surak", "node_id": "MDQ6VXNlcjg3ODM5OQ==", "organizations_url": "https://api.github.com/users/surak/orgs", "received_events_url": "https://api.github.com/users/surak/received_events", "repos_url": "https://api.github.com/users/surak/repos", "site_admin": false, "starred_url": "https://api.github.com/users/surak/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/surak/subscriptions", "type": "User", "url": "https://api.github.com/users/surak" }
Readme.md is misleading about kinds of datasets?
https://api.github.com/repos/huggingface/datasets/issues/1988/events
null
https://api.github.com/repos/huggingface/datasets/issues/1988/labels{/name}
2021-03-04T17:04:20Z
null
false
null
null
822,324,605
[]
https://api.github.com/repos/huggingface/datasets/issues/1988
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi! At the README.MD, you say: "efficient data pre-processing: simple, fast and reproducible data pre-processing for the above public datasets as well as your own local datasets in CSV/JSON/text. " But here: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py#L82-L117 You mention other kinds of datasets, with images and so on. I'm confused. Is it possible to use it to store, say, imagenet locally?
2021-08-04T18:05:23Z
https://github.com/huggingface/datasets/issues/1988
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1988/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1987/comments
https://api.github.com/repos/huggingface/datasets/issues/1987/timeline
2022-10-05T13:12:26Z
null
completed
MDU6SXNzdWU4MjIzMDg5NTY=
closed
[]
null
1,987
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
wmt15 is broken
https://api.github.com/repos/huggingface/datasets/issues/1987/events
null
https://api.github.com/repos/huggingface/datasets/issues/1987/labels{/name}
2021-03-04T16:46:25Z
null
false
null
null
822,308,956
[]
https://api.github.com/repos/huggingface/datasets/issues/1987
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
While testing the hotfix, I tried a random other wmt release and found wmt15 to be broken: ``` python -c 'from datasets import load_dataset; load_dataset("wmt15", "de-en")' Downloading: 2.91kB [00:00, 818kB/s] Downloading: 3.02kB [00:00, 897kB/s] Downloading: 41.1kB [00:00, 19.1MB/s] Downloading and preparing dataset wmt15/de-en (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /home/stas/.cache/huggingface/datasets/wmt15/de-en/1.0.0/39ad5f9262a0910a8ad7028ad432731ad23fdf91f2cebbbf2ba4776b9859e87f... Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/load.py", line 740, in load_dataset builder_instance.download_and_prepare( File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/builder.py", line 578, in download_and_prepare self._download_and_prepare( File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/builder.py", line 634, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/stas/.cache/huggingface/modules/datasets_modules/datasets/wmt15/39ad5f9262a0910a8ad7028ad432731ad23fdf91f2cebbbf2ba4776b9859e87f/wmt_utils.py", line 757, in _split_generators downloaded_files = dl_manager.download_and_extract(urls_to_download) File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 283, in download_and_extract return self.extract(self.download(url_or_urls)) File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 191, in download downloaded_path_or_paths = map_nested( File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 203, in map_nested mapped = [ File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 204, in <listcomp> _single_map_nested((function, obj, types, None, True)) for obj in tqdm(iterable, disable=disable_tqdm) File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 160, in _single_map_nested mapped = [_single_map_nested((function, v, types, None, True)) for v in pbar] File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 160, in <listcomp> mapped = [_single_map_nested((function, v, types, None, True)) for v in pbar] File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 142, in _single_map_nested return function(data_struct) File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 214, in _download return cached_path(url_or_filename, download_config=download_config) File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 274, in cached_path output_path = get_from_cache( File "/home/stas/anaconda3/envs/main-38/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 614, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://huggingface.co/datasets/wmt/wmt15/resolve/main/training-parallel-nc-v10.tgz ```
2022-10-05T13:12:26Z
https://github.com/huggingface/datasets/issues/1987
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1987/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1986/comments
https://api.github.com/repos/huggingface/datasets/issues/1986/timeline
2021-03-04T14:31:07Z
null
completed
MDU6SXNzdWU4MjIxNzYyOTA=
closed
[]
null
1,986
{ "avatar_url": "https://avatars.githubusercontent.com/u/32322564?v=4", "events_url": "https://api.github.com/users/sabania/events{/privacy}", "followers_url": "https://api.github.com/users/sabania/followers", "following_url": "https://api.github.com/users/sabania/following{/other_user}", "gists_url": "https://api.github.com/users/sabania/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sabania", "id": 32322564, "login": "sabania", "node_id": "MDQ6VXNlcjMyMzIyNTY0", "organizations_url": "https://api.github.com/users/sabania/orgs", "received_events_url": "https://api.github.com/users/sabania/received_events", "repos_url": "https://api.github.com/users/sabania/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sabania/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sabania/subscriptions", "type": "User", "url": "https://api.github.com/users/sabania" }
wmt datasets fail to load
https://api.github.com/repos/huggingface/datasets/issues/1986/events
null
https://api.github.com/repos/huggingface/datasets/issues/1986/labels{/name}
2021-03-04T14:18:55Z
null
false
null
null
822,176,290
[]
https://api.github.com/repos/huggingface/datasets/issues/1986
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
~\.cache\huggingface\modules\datasets_modules\datasets\wmt14\43e717d978d2261502b0194999583acb874ba73b0f4aed0ada2889d1bb00f36e\wmt_utils.py in _split_generators(self, dl_manager) 758 # Extract manually downloaded files. 759 manual_files = dl_manager.extract(manual_paths_dict) --> 760 extraction_map = dict(downloaded_files, **manual_files) 761 762 for language in self.config.language_pair: TypeError: type object argument after ** must be a mapping, not list
2021-03-04T14:31:07Z
https://github.com/huggingface/datasets/issues/1986
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1986/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1985/comments
https://api.github.com/repos/huggingface/datasets/issues/1985/timeline
2021-03-16T09:44:00Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg0ODM4NjIw
closed
[]
false
1,985
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Optimize int precision
https://api.github.com/repos/huggingface/datasets/issues/1985/events
null
https://api.github.com/repos/huggingface/datasets/issues/1985/labels{/name}
2021-03-04T14:12:23Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1985.diff", "html_url": "https://github.com/huggingface/datasets/pull/1985", "merged_at": "2021-03-16T09:44:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/1985.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1985" }
822,170,651
[]
https://api.github.com/repos/huggingface/datasets/issues/1985
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Optimize int precision to reduce dataset file size. Close #1973, close #1825, close #861.
2021-03-22T12:04:40Z
https://github.com/huggingface/datasets/pull/1985
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 3, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/1985/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1984/comments
https://api.github.com/repos/huggingface/datasets/issues/1984/timeline
2022-11-04T14:19:16Z
null
completed
MDU6SXNzdWU4MjE4MTY1ODg=
closed
[]
null
1,984
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Add tests for WMT datasets
https://api.github.com/repos/huggingface/datasets/issues/1984/events
null
https://api.github.com/repos/huggingface/datasets/issues/1984/labels{/name}
2021-03-04T06:46:42Z
null
false
null
null
821,816,588
[]
https://api.github.com/repos/huggingface/datasets/issues/1984
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As requested in #1981, we need tests for WMT datasets, using dummy data.
2022-11-04T14:19:16Z
https://github.com/huggingface/datasets/issues/1984
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1984/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1983/comments
https://api.github.com/repos/huggingface/datasets/issues/1983/timeline
2022-10-05T13:13:26Z
null
completed
MDU6SXNzdWU4MjE3NDYwMDg=
closed
[]
null
1,983
{ "avatar_url": "https://avatars.githubusercontent.com/u/39556019?v=4", "events_url": "https://api.github.com/users/h-peng17/events{/privacy}", "followers_url": "https://api.github.com/users/h-peng17/followers", "following_url": "https://api.github.com/users/h-peng17/following{/other_user}", "gists_url": "https://api.github.com/users/h-peng17/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/h-peng17", "id": 39556019, "login": "h-peng17", "node_id": "MDQ6VXNlcjM5NTU2MDE5", "organizations_url": "https://api.github.com/users/h-peng17/orgs", "received_events_url": "https://api.github.com/users/h-peng17/received_events", "repos_url": "https://api.github.com/users/h-peng17/repos", "site_admin": false, "starred_url": "https://api.github.com/users/h-peng17/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/h-peng17/subscriptions", "type": "User", "url": "https://api.github.com/users/h-peng17" }
The size of CoNLL-2003 is not consistant with the official release.
https://api.github.com/repos/huggingface/datasets/issues/1983/events
null
https://api.github.com/repos/huggingface/datasets/issues/1983/labels{/name}
2021-03-04T04:41:34Z
null
false
null
null
821,746,008
[]
https://api.github.com/repos/huggingface/datasets/issues/1983
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Thanks for the dataset sharing! But when I use conll-2003, I meet some questions. The statistics of conll-2003 in this repo is : \#train 14041 \#dev 3250 \#test 3453 While the official statistics is: \#train 14987 \#dev 3466 \#test 3684 Wish for your reply~
2022-10-05T13:13:26Z
https://github.com/huggingface/datasets/issues/1983
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1983/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1982/comments
https://api.github.com/repos/huggingface/datasets/issues/1982/timeline
2021-03-03T22:48:36Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg0MjM2NzQ0
closed
[]
false
1,982
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Fix NestedDataStructure.data for empty dict
https://api.github.com/repos/huggingface/datasets/issues/1982/events
null
https://api.github.com/repos/huggingface/datasets/issues/1982/labels{/name}
2021-03-03T20:16:51Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1982.diff", "html_url": "https://github.com/huggingface/datasets/pull/1982", "merged_at": "2021-03-03T22:48:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/1982.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1982" }
821,448,791
[]
https://api.github.com/repos/huggingface/datasets/issues/1982
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Fix #1981
2021-03-04T16:46:04Z
https://github.com/huggingface/datasets/pull/1982
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1982/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1981/comments
https://api.github.com/repos/huggingface/datasets/issues/1981/timeline
2021-03-03T22:48:36Z
null
completed
MDU6SXNzdWU4MjE0MTExMDk=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1,981
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
wmt datasets fail to load
https://api.github.com/repos/huggingface/datasets/issues/1981/events
null
https://api.github.com/repos/huggingface/datasets/issues/1981/labels{/name}
2021-03-03T19:21:39Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
null
821,411,109
[]
https://api.github.com/repos/huggingface/datasets/issues/1981
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
on master: ``` python -c 'from datasets import load_dataset; load_dataset("wmt14", "de-en")' Downloading and preparing dataset wmt14/de-en (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /home/stas/.cache/huggingface/datasets/wmt14/de-en/1.0.0/43e717d978d2261502b0194999583acb874ba73b0f4aed0ada2889d1bb00f36e... Traceback (most recent call last): File "<string>", line 1, in <module> File "/mnt/nvme1/code/huggingface/datasets-master/src/datasets/load.py", line 740, in load_dataset builder_instance.download_and_prepare( File "/mnt/nvme1/code/huggingface/datasets-master/src/datasets/builder.py", line 578, in download_and_prepare self._download_and_prepare( File "/mnt/nvme1/code/huggingface/datasets-master/src/datasets/builder.py", line 634, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/stas/.cache/huggingface/modules/datasets_modules/datasets/wmt14/43e717d978d2261502b0194999583acb874ba73b0f4aed0ada2889d1bb00f36e/wmt_utils.py", line 760, in _split_generators extraction_map = dict(downloaded_files, **manual_files) ``` it worked fine recently. same problem if I try wmt16. git bisect points to this commit from Feb 25 as the culprit https://github.com/huggingface/datasets/commit/792f1d9bb1c5361908f73e2ef7f0181b2be409fa @albertvillanova
2021-03-04T14:16:47Z
https://github.com/huggingface/datasets/issues/1981
{ "+1": 0, "-1": 0, "confused": 1, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1981/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1980/comments
https://api.github.com/repos/huggingface/datasets/issues/1980/timeline
2021-03-15T11:27:26Z
null
null
MDExOlB1bGxSZXF1ZXN0NTg0MTI1OTUy
closed
[]
false
1,980
{ "avatar_url": "https://avatars.githubusercontent.com/u/25499439?v=4", "events_url": "https://api.github.com/users/KaijuML/events{/privacy}", "followers_url": "https://api.github.com/users/KaijuML/followers", "following_url": "https://api.github.com/users/KaijuML/following{/other_user}", "gists_url": "https://api.github.com/users/KaijuML/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/KaijuML", "id": 25499439, "login": "KaijuML", "node_id": "MDQ6VXNlcjI1NDk5NDM5", "organizations_url": "https://api.github.com/users/KaijuML/orgs", "received_events_url": "https://api.github.com/users/KaijuML/received_events", "repos_url": "https://api.github.com/users/KaijuML/repos", "site_admin": false, "starred_url": "https://api.github.com/users/KaijuML/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KaijuML/subscriptions", "type": "User", "url": "https://api.github.com/users/KaijuML" }
Loading all answers from drop
https://api.github.com/repos/huggingface/datasets/issues/1980/events
null
https://api.github.com/repos/huggingface/datasets/issues/1980/labels{/name}
2021-03-03T17:13:07Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1980.diff", "html_url": "https://github.com/huggingface/datasets/pull/1980", "merged_at": "2021-03-15T11:27:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/1980.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1980" }
821,312,810
[]
https://api.github.com/repos/huggingface/datasets/issues/1980
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hello all, I propose this change to the DROP loading script so that all answers are loaded no matter their type. Currently, only "span" answers are loaded, which excludes a significant amount of answers from drop (i.e. "number" and "date"). I updated the script with the version I use for my work. However, I couldn't find a way to verify that all is working when integrated with the datasets repo, since the `load_dataset` method seems to always download the script from github and not local files. Note that 9 items from the train set have no answers, as well as 1 from the validation set. The script I propose simply do not load them. Let me know if there is anything else I can do, Clément
2021-03-15T11:27:26Z
https://github.com/huggingface/datasets/pull/1980
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1980/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1979/comments
https://api.github.com/repos/huggingface/datasets/issues/1979/timeline
2021-03-12T13:10:50Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgzODQ3MTk3
closed
[]
false
1,979
{ "avatar_url": "https://avatars.githubusercontent.com/u/8195444?v=4", "events_url": "https://api.github.com/users/hemildesai/events{/privacy}", "followers_url": "https://api.github.com/users/hemildesai/followers", "following_url": "https://api.github.com/users/hemildesai/following{/other_user}", "gists_url": "https://api.github.com/users/hemildesai/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hemildesai", "id": 8195444, "login": "hemildesai", "node_id": "MDQ6VXNlcjgxOTU0NDQ=", "organizations_url": "https://api.github.com/users/hemildesai/orgs", "received_events_url": "https://api.github.com/users/hemildesai/received_events", "repos_url": "https://api.github.com/users/hemildesai/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hemildesai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hemildesai/subscriptions", "type": "User", "url": "https://api.github.com/users/hemildesai" }
Add article_id and process test set template for semeval 2020 task 11…
https://api.github.com/repos/huggingface/datasets/issues/1979/events
null
https://api.github.com/repos/huggingface/datasets/issues/1979/labels{/name}
2021-03-03T10:34:32Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1979.diff", "html_url": "https://github.com/huggingface/datasets/pull/1979", "merged_at": "2021-03-12T13:10:50Z", "patch_url": "https://github.com/huggingface/datasets/pull/1979.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1979" }
820,977,853
[]
https://api.github.com/repos/huggingface/datasets/issues/1979
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
… dataset - `article_id` is needed to create the submission file for the task at https://propaganda.qcri.org/semeval2020-task11/ - The `technique classification` task provides the span indices in a template for the test set that is necessary to complete the task. This PR implements processing of that template for the dataset.
2021-03-13T10:59:40Z
https://github.com/huggingface/datasets/pull/1979
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1979/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1978/comments
https://api.github.com/repos/huggingface/datasets/issues/1978/timeline
2021-03-05T09:33:55Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgzODI5Njgz
closed
[]
false
1,978
{ "avatar_url": "https://avatars.githubusercontent.com/u/36982089?v=4", "events_url": "https://api.github.com/users/lorinczb/events{/privacy}", "followers_url": "https://api.github.com/users/lorinczb/followers", "following_url": "https://api.github.com/users/lorinczb/following{/other_user}", "gists_url": "https://api.github.com/users/lorinczb/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lorinczb", "id": 36982089, "login": "lorinczb", "node_id": "MDQ6VXNlcjM2OTgyMDg5", "organizations_url": "https://api.github.com/users/lorinczb/orgs", "received_events_url": "https://api.github.com/users/lorinczb/received_events", "repos_url": "https://api.github.com/users/lorinczb/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lorinczb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lorinczb/subscriptions", "type": "User", "url": "https://api.github.com/users/lorinczb" }
Adding ro sts dataset
https://api.github.com/repos/huggingface/datasets/issues/1978/events
null
https://api.github.com/repos/huggingface/datasets/issues/1978/labels{/name}
2021-03-03T10:08:53Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1978.diff", "html_url": "https://github.com/huggingface/datasets/pull/1978", "merged_at": "2021-03-05T09:33:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/1978.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1978" }
820,956,806
[]
https://api.github.com/repos/huggingface/datasets/issues/1978
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Adding [RO-STS](https://github.com/dumitrescustefan/RO-STS) dataset
2021-03-05T10:00:14Z
https://github.com/huggingface/datasets/pull/1978
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1978/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1977/comments
https://api.github.com/repos/huggingface/datasets/issues/1977/timeline
null
null
null
MDU6SXNzdWU4MjAzMTIwMjI=
open
[]
null
1,977
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
ModuleNotFoundError: No module named 'apache_beam' for wikipedia datasets
https://api.github.com/repos/huggingface/datasets/issues/1977/events
null
https://api.github.com/repos/huggingface/datasets/issues/1977/labels{/name}
2021-03-02T19:21:28Z
null
false
null
null
820,312,022
[]
https://api.github.com/repos/huggingface/datasets/issues/1977
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I am trying to run run_mlm.py code [1] of huggingface with following "wikipedia"/ "20200501.aa" dataset: `python run_mlm.py --model_name_or_path bert-base-multilingual-cased --dataset_name wikipedia --dataset_config_name 20200501.aa --do_train --do_eval --output_dir /tmp/test-mlm --max_seq_length 256 ` I am getting this error, but as per documentation, huggingface dataset provide processed version of this dataset and users can load it without requiring setup extra settings for apache-beam. could you help me please to load this dataset? Do you think I can run run_ml.py with this dataset? or anyway I could subsample and train the model? I greatly appreciate providing the processed version of all languages for this dataset, which allow the user to use them without setting up apache-beam,. thanks I really appreciate your help. @lhoestq thanks. [1] https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_mlm.py error I get: ``` >>> import datasets >>> datasets.load_dataset("wikipedia", "20200501.aa") Downloading and preparing dataset wikipedia/20200501.aa (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /dara/temp/cache_home_2/datasets/wikipedia/20200501.aa/1.0.0/4021357e28509391eab2f8300d9b689e7e8f3a877ebb3d354b01577d497ebc63... Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/dara/temp/libs/anaconda3/envs/codes/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/load.py", line 746, in load_dataset use_auth_token=use_auth_token, File "/dara/temp/libs/anaconda3/envs/codes/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/builder.py", line 573, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/dara/temp/libs/anaconda3/envs/codes/lib/python3.7/site-packages/datasets-1.3.0-py3.7.egg/datasets/builder.py", line 1099, in _download_and_prepare import apache_beam as beam ModuleNotFoundError: No module named 'apache_beam' ```
2021-03-03T10:17:40Z
https://github.com/huggingface/datasets/issues/1977
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1977/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1976/comments
https://api.github.com/repos/huggingface/datasets/issues/1976/timeline
2021-03-03T15:45:30Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgzMjA3NDI4
closed
[]
false
1,976
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Add datasets full offline mode with HF_DATASETS_OFFLINE
https://api.github.com/repos/huggingface/datasets/issues/1976/events
null
https://api.github.com/repos/huggingface/datasets/issues/1976/labels{/name}
2021-03-02T17:26:59Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1976.diff", "html_url": "https://github.com/huggingface/datasets/pull/1976", "merged_at": "2021-03-03T15:45:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/1976.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1976" }
820,228,538
[]
https://api.github.com/repos/huggingface/datasets/issues/1976
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Add the HF_DATASETS_OFFLINE environment variable for users who want to use `datasets` offline without having to wait for the network timeouts/retries to happen. This was requested in https://github.com/huggingface/datasets/issues/1939 cc @stas00
2021-03-03T15:45:31Z
https://github.com/huggingface/datasets/pull/1976
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1976/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1975/comments
https://api.github.com/repos/huggingface/datasets/issues/1975/timeline
2021-03-04T10:43:22Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgzMTg4NjM3
closed
[]
false
1,975
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Fix flake8
https://api.github.com/repos/huggingface/datasets/issues/1975/events
null
https://api.github.com/repos/huggingface/datasets/issues/1975/labels{/name}
2021-03-02T16:59:13Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1975.diff", "html_url": "https://github.com/huggingface/datasets/pull/1975", "merged_at": "2021-03-04T10:43:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/1975.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1975" }
820,205,485
[]
https://api.github.com/repos/huggingface/datasets/issues/1975
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Fix flake8 style.
2021-03-04T10:43:22Z
https://github.com/huggingface/datasets/pull/1975
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1975/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1974/comments
https://api.github.com/repos/huggingface/datasets/issues/1974/timeline
2021-03-04T10:42:48Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgzMTE5MDI0
closed
[]
false
1,974
{ "avatar_url": "https://avatars.githubusercontent.com/u/32727188?v=4", "events_url": "https://api.github.com/users/ydcjeff/events{/privacy}", "followers_url": "https://api.github.com/users/ydcjeff/followers", "following_url": "https://api.github.com/users/ydcjeff/following{/other_user}", "gists_url": "https://api.github.com/users/ydcjeff/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ydcjeff", "id": 32727188, "login": "ydcjeff", "node_id": "MDQ6VXNlcjMyNzI3MTg4", "organizations_url": "https://api.github.com/users/ydcjeff/orgs", "received_events_url": "https://api.github.com/users/ydcjeff/received_events", "repos_url": "https://api.github.com/users/ydcjeff/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ydcjeff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ydcjeff/subscriptions", "type": "User", "url": "https://api.github.com/users/ydcjeff" }
feat(docs): navigate with left/right arrow keys
https://api.github.com/repos/huggingface/datasets/issues/1974/events
null
https://api.github.com/repos/huggingface/datasets/issues/1974/labels{/name}
2021-03-02T15:24:50Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1974.diff", "html_url": "https://github.com/huggingface/datasets/pull/1974", "merged_at": "2021-03-04T10:42:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/1974.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1974" }
820,122,223
[]
https://api.github.com/repos/huggingface/datasets/issues/1974
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Enables docs navigation with left/right arrow keys. It can be useful for the ones who navigate with keyboard a lot. More info : https://github.com/sphinx-doc/sphinx/pull/2064 You can try here : https://29353-250213286-gh.circle-artifacts.com/0/docs/_build/html/index.html
2021-03-04T10:44:12Z
https://github.com/huggingface/datasets/pull/1974
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1974/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1973/comments
https://api.github.com/repos/huggingface/datasets/issues/1973/timeline
2021-03-16T09:44:00Z
null
completed
MDU6SXNzdWU4MjAwNzczMTI=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1,973
{ "avatar_url": "https://avatars.githubusercontent.com/u/17202292?v=4", "events_url": "https://api.github.com/users/ioana-blue/events{/privacy}", "followers_url": "https://api.github.com/users/ioana-blue/followers", "following_url": "https://api.github.com/users/ioana-blue/following{/other_user}", "gists_url": "https://api.github.com/users/ioana-blue/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ioana-blue", "id": 17202292, "login": "ioana-blue", "node_id": "MDQ6VXNlcjE3MjAyMjky", "organizations_url": "https://api.github.com/users/ioana-blue/orgs", "received_events_url": "https://api.github.com/users/ioana-blue/received_events", "repos_url": "https://api.github.com/users/ioana-blue/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ioana-blue/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ioana-blue/subscriptions", "type": "User", "url": "https://api.github.com/users/ioana-blue" }
Question: what gets stored in the datasets cache and why is it so huge?
https://api.github.com/repos/huggingface/datasets/issues/1973/events
null
https://api.github.com/repos/huggingface/datasets/issues/1973/labels{/name}
2021-03-02T14:35:53Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
null
820,077,312
[]
https://api.github.com/repos/huggingface/datasets/issues/1973
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I'm running several training jobs (around 10) with a relatively large dataset (3M samples). The datasets cache reached 178G and it seems really large. What is it stored in there and why is it so large? I don't think I noticed this problem before and seems to be related to the new version of the datasets library. Any insight? Thank you!
2021-03-30T14:03:59Z
https://github.com/huggingface/datasets/issues/1973
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1973/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1972/comments
https://api.github.com/repos/huggingface/datasets/issues/1972/timeline
2022-06-01T16:08:47Z
null
completed
MDU6SXNzdWU4MTk3NTI3NjE=
closed
[]
null
1,972
{ "avatar_url": "https://avatars.githubusercontent.com/u/23195502?v=4", "events_url": "https://api.github.com/users/farooqzaman1/events{/privacy}", "followers_url": "https://api.github.com/users/farooqzaman1/followers", "following_url": "https://api.github.com/users/farooqzaman1/following{/other_user}", "gists_url": "https://api.github.com/users/farooqzaman1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/farooqzaman1", "id": 23195502, "login": "farooqzaman1", "node_id": "MDQ6VXNlcjIzMTk1NTAy", "organizations_url": "https://api.github.com/users/farooqzaman1/orgs", "received_events_url": "https://api.github.com/users/farooqzaman1/received_events", "repos_url": "https://api.github.com/users/farooqzaman1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/farooqzaman1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/farooqzaman1/subscriptions", "type": "User", "url": "https://api.github.com/users/farooqzaman1" }
'Dataset' object has no attribute 'rename_column'
https://api.github.com/repos/huggingface/datasets/issues/1972/events
null
https://api.github.com/repos/huggingface/datasets/issues/1972/labels{/name}
2021-03-02T08:01:49Z
null
false
null
null
819,752,761
[]
https://api.github.com/repos/huggingface/datasets/issues/1972
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
'Dataset' object has no attribute 'rename_column'
2022-06-01T16:08:47Z
https://github.com/huggingface/datasets/issues/1972
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1972/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1971/comments
https://api.github.com/repos/huggingface/datasets/issues/1971/timeline
2021-03-10T16:36:57Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgyNzgyNTU0
closed
[]
false
1,971
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Fix ArrowWriter closes stream at exit
https://api.github.com/repos/huggingface/datasets/issues/1971/events
null
https://api.github.com/repos/huggingface/datasets/issues/1971/labels{/name}
2021-03-02T07:12:34Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1971.diff", "html_url": "https://github.com/huggingface/datasets/pull/1971", "merged_at": "2021-03-10T16:36:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/1971.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1971" }
819,714,231
[]
https://api.github.com/repos/huggingface/datasets/issues/1971
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Current implementation of ArrowWriter does not properly release the `stream` resource (by closing it) if its `finalize()` method is not called and/or an Exception is raised before/during the call to its `finalize()` method. Therefore, ArrowWriter should be used as a context manager that properly closes its `stream` resource at exit.
2021-03-10T16:36:57Z
https://github.com/huggingface/datasets/pull/1971
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1971/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1970/comments
https://api.github.com/repos/huggingface/datasets/issues/1970/timeline
2021-03-02T02:01:33Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgyNjAzMzEw
closed
[]
false
1,970
{ "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yjernite", "id": 10469459, "login": "yjernite", "node_id": "MDQ6VXNlcjEwNDY5NDU5", "organizations_url": "https://api.github.com/users/yjernite/orgs", "received_events_url": "https://api.github.com/users/yjernite/received_events", "repos_url": "https://api.github.com/users/yjernite/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "type": "User", "url": "https://api.github.com/users/yjernite" }
Fixing the URL filtering for bad MLSUM examples in GEM
https://api.github.com/repos/huggingface/datasets/issues/1970/events
null
https://api.github.com/repos/huggingface/datasets/issues/1970/labels{/name}
2021-03-02T01:22:58Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1970.diff", "html_url": "https://github.com/huggingface/datasets/pull/1970", "merged_at": "2021-03-02T02:01:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/1970.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1970" }
819,500,620
[]
https://api.github.com/repos/huggingface/datasets/issues/1970
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
This updates the code and metadata to use the updated `gem_mlsum_bad_ids_fixed.json` file provided by @juand-r cc @sebastianGehrmann
2021-03-02T03:19:06Z
https://github.com/huggingface/datasets/pull/1970
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1970/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1967/comments
https://api.github.com/repos/huggingface/datasets/issues/1967/timeline
2021-03-02T17:25:00Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgyMjc5OTEx
closed
[]
false
1,967
{ "avatar_url": "https://avatars.githubusercontent.com/u/5150963?v=4", "events_url": "https://api.github.com/users/yavuzKomecoglu/events{/privacy}", "followers_url": "https://api.github.com/users/yavuzKomecoglu/followers", "following_url": "https://api.github.com/users/yavuzKomecoglu/following{/other_user}", "gists_url": "https://api.github.com/users/yavuzKomecoglu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yavuzKomecoglu", "id": 5150963, "login": "yavuzKomecoglu", "node_id": "MDQ6VXNlcjUxNTA5NjM=", "organizations_url": "https://api.github.com/users/yavuzKomecoglu/orgs", "received_events_url": "https://api.github.com/users/yavuzKomecoglu/received_events", "repos_url": "https://api.github.com/users/yavuzKomecoglu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yavuzKomecoglu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yavuzKomecoglu/subscriptions", "type": "User", "url": "https://api.github.com/users/yavuzKomecoglu" }
Add Turkish News Category Dataset - 270K - Lite Version
https://api.github.com/repos/huggingface/datasets/issues/1967/events
null
https://api.github.com/repos/huggingface/datasets/issues/1967/labels{/name}
2021-03-01T18:21:59Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1967.diff", "html_url": "https://github.com/huggingface/datasets/pull/1967", "merged_at": "2021-03-02T17:25:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/1967.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1967" }
819,129,568
[]
https://api.github.com/repos/huggingface/datasets/issues/1967
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR adds the Turkish News Categories Dataset (270K - Lite Version) dataset which is a text classification dataset by me, @basakbuluz and @serdarakyol. This dataset contains the same news from the current [interpress_news_category_tr dataset](https://huggingface.co/datasets/interpress_news_category_tr) but contains less information, OCR errors are reduced, can be easily separated, and can be divided into 10 classes ("kültürsanat", "ekonomi", "siyaset", "eğitim", "dünya", "spor", "teknoloji", "magazin", "sağlık", "gündem") were rearranged.
2021-03-02T17:25:00Z
https://github.com/huggingface/datasets/pull/1967
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1967/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1966/comments
https://api.github.com/repos/huggingface/datasets/issues/1966/timeline
2021-03-02T13:05:44Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgyMjU2MzE0
closed
[]
false
1,966
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix metrics collision in separate multiprocessed experiments
https://api.github.com/repos/huggingface/datasets/issues/1966/events
null
https://api.github.com/repos/huggingface/datasets/issues/1966/labels{/name}
2021-03-01T17:45:18Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1966.diff", "html_url": "https://github.com/huggingface/datasets/pull/1966", "merged_at": "2021-03-02T13:05:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/1966.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1966" }
819,101,253
[]
https://api.github.com/repos/huggingface/datasets/issues/1966
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed in #1942 , there's a issue with locks if you run multiple separate evaluation experiments in a multiprocessed setup. Indeed there is a time span in Metric._finalize() where the process 0 loses its lock before re-acquiring it. This is bad since the lock of the process 0 tells the other process that the corresponding cache file is available for writing/reading/deleting: we end up having one metric cache that collides with another one. This can raise FileNotFound errors when a metric tries to read the cache file and if the second conflicting metric deleted it. To fix that I made sure that the lock file of the process 0 stays acquired from the cache file creation to the end of the metric computation. This way the other metrics can simply sample a new hashing name in order to avoid the collision. Finally I added missing tests for separate experiments in distributed setup.
2021-03-02T13:05:45Z
https://github.com/huggingface/datasets/pull/1966
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1966/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1965/comments
https://api.github.com/repos/huggingface/datasets/issues/1965/timeline
2021-03-04T19:40:42Z
null
completed
MDU6SXNzdWU4MTg4MzM0NjA=
closed
[]
null
1,965
{ "avatar_url": "https://avatars.githubusercontent.com/u/16892570?v=4", "events_url": "https://api.github.com/users/shamanez/events{/privacy}", "followers_url": "https://api.github.com/users/shamanez/followers", "following_url": "https://api.github.com/users/shamanez/following{/other_user}", "gists_url": "https://api.github.com/users/shamanez/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/shamanez", "id": 16892570, "login": "shamanez", "node_id": "MDQ6VXNlcjE2ODkyNTcw", "organizations_url": "https://api.github.com/users/shamanez/orgs", "received_events_url": "https://api.github.com/users/shamanez/received_events", "repos_url": "https://api.github.com/users/shamanez/repos", "site_admin": false, "starred_url": "https://api.github.com/users/shamanez/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shamanez/subscriptions", "type": "User", "url": "https://api.github.com/users/shamanez" }
Can we parallelized the add_faiss_index process over dataset shards ?
https://api.github.com/repos/huggingface/datasets/issues/1965/events
null
https://api.github.com/repos/huggingface/datasets/issues/1965/labels{/name}
2021-03-01T12:47:34Z
null
false
null
null
818,833,460
[]
https://api.github.com/repos/huggingface/datasets/issues/1965
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I am thinking of making the **add_faiss_index** process faster. What if we run the add_faiss_index process on separate dataset shards and then combine them before (dataset.concatenate) saving the faiss.index file ? I feel theoretically this will reduce the accuracy of retrieval since it affects the indexing process. @lhoestq
2021-03-04T19:40:56Z
https://github.com/huggingface/datasets/issues/1965
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1965/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1964/comments
https://api.github.com/repos/huggingface/datasets/issues/1964/timeline
2022-10-05T13:09:47Z
null
completed
MDU6SXNzdWU4MTg2MjQ4NjQ=
closed
[]
null
1,964
{ "avatar_url": "https://avatars.githubusercontent.com/u/44536699?v=4", "events_url": "https://api.github.com/users/LeopoldACC/events{/privacy}", "followers_url": "https://api.github.com/users/LeopoldACC/followers", "following_url": "https://api.github.com/users/LeopoldACC/following{/other_user}", "gists_url": "https://api.github.com/users/LeopoldACC/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LeopoldACC", "id": 44536699, "login": "LeopoldACC", "node_id": "MDQ6VXNlcjQ0NTM2Njk5", "organizations_url": "https://api.github.com/users/LeopoldACC/orgs", "received_events_url": "https://api.github.com/users/LeopoldACC/received_events", "repos_url": "https://api.github.com/users/LeopoldACC/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LeopoldACC/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LeopoldACC/subscriptions", "type": "User", "url": "https://api.github.com/users/LeopoldACC" }
Datasets.py function load_dataset does not match squad dataset
https://api.github.com/repos/huggingface/datasets/issues/1964/events
null
https://api.github.com/repos/huggingface/datasets/issues/1964/labels{/name}
2021-03-01T08:41:31Z
null
false
null
null
818,624,864
[]
https://api.github.com/repos/huggingface/datasets/issues/1964
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
### 1 When I try to train lxmert,and follow the code in README that --dataset name: ```shell python examples/question-answering/run_qa.py --model_name_or_path unc-nlp/lxmert-base-uncased --dataset_name squad --do_train --do_eval --per_device_train_batch_size 12 --learning_rate 3e-5 --num_train_epochs 2 --max_seq_length 384 --doc_stride 128 --output_dir /home2/zhenggo1/checkpoint/lxmert_squad ``` the bug is that: ``` Downloading and preparing dataset squad/plain_text (download: 33.51 MiB, generated: 85.75 MiB, post-processed: Unknown size, total: 119.27 MiB) to /home2/zhenggo1/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7... Traceback (most recent call last): File "examples/question-answering/run_qa.py", line 501, in <module> main() File "examples/question-answering/run_qa.py", line 217, in main datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) File "/home2/zhenggo1/anaconda3/envs/lpot/lib/python3.7/site-packages/datasets/load.py", line 746, in load_dataset use_auth_token=use_auth_token, File "/home2/zhenggo1/anaconda3/envs/lpot/lib/python3.7/site-packages/datasets/builder.py", line 573, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home2/zhenggo1/anaconda3/envs/lpot/lib/python3.7/site-packages/datasets/builder.py", line 633, in _download_and_prepare self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files" File "/home2/zhenggo1/anaconda3/envs/lpot/lib/python3.7/site-packages/datasets/utils/info_utils.py", line 39, in verify_checksums raise NonMatchingChecksumError(error_msg + str(bad_urls)) datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json'] ``` And I try to find the [checksum link](https://github.com/huggingface/datasets/blob/master/datasets/squad/dataset_infos.json) ,is the problem plain_text do not have a checksum? ### 2 When I try to train lxmert,and use local dataset: ``` python examples/question-answering/run_qa.py --model_name_or_path unc-nlp/lxmert-base-uncased --train_file $SQUAD_DIR/train-v1.1.json --validation_file $SQUAD_DIR/dev-v1.1.json --do_train --do_eval --per_device_train_batch_size 12 --learning_rate 3e-5 --num_train_epochs 2 --max_seq_length 384 --doc_stride 128 --output_dir /home2/zhenggo1/checkpoint/lxmert_squad ``` The bug is that ``` ['title', 'paragraphs'] Traceback (most recent call last): File "examples/question-answering/run_qa.py", line 501, in <module> main() File "examples/question-answering/run_qa.py", line 273, in main answer_column_name = "answers" if "answers" in column_names else column_names[2] IndexError: list index out of range ``` I print the answer_column_name and find that local squad dataset need the package datasets to preprocessing so that the code below can work: ``` if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names print(datasets["train"].column_names) question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] ``` ## Please tell me how to fix the bug,thks a lot!
2022-10-05T13:09:47Z
https://github.com/huggingface/datasets/issues/1964
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1964/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1963/comments
https://api.github.com/repos/huggingface/datasets/issues/1963/timeline
2022-10-05T13:13:46Z
null
completed
MDU6SXNzdWU4MTgyODk5Njc=
closed
[]
null
1,963
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
bug in SNLI dataset
https://api.github.com/repos/huggingface/datasets/issues/1963/events
null
https://api.github.com/repos/huggingface/datasets/issues/1963/labels{/name}
2021-02-28T19:36:20Z
null
false
null
null
818,289,967
[]
https://api.github.com/repos/huggingface/datasets/issues/1963
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi There is label of -1 in train set of SNLI dataset, please find the code below: ``` import numpy as np import datasets data = datasets.load_dataset("snli")["train"] labels = [] for d in data: labels.append(d["label"]) print(np.unique(labels)) ``` and results: `[-1 0 1 2]` version of datasets used: `datasets 1.2.1 <pip> ` thanks for your help. @lhoestq
2022-10-05T13:13:46Z
https://github.com/huggingface/datasets/issues/1963
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1963/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1962/comments
https://api.github.com/repos/huggingface/datasets/issues/1962/timeline
2021-03-03T16:37:50Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgxNDQwNzM4
closed
[]
false
1,962
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
Fix unused arguments
https://api.github.com/repos/huggingface/datasets/issues/1962/events
null
https://api.github.com/repos/huggingface/datasets/issues/1962/labels{/name}
2021-02-28T02:47:07Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1962.diff", "html_url": "https://github.com/huggingface/datasets/pull/1962", "merged_at": "2021-03-03T16:37:50Z", "patch_url": "https://github.com/huggingface/datasets/pull/1962.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1962" }
818,089,156
[]
https://api.github.com/repos/huggingface/datasets/issues/1962
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Noticed some args in the codebase are not used, so managed to find all such occurrences with Pylance and fix them.
2021-03-11T02:18:17Z
https://github.com/huggingface/datasets/pull/1962
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1962/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1961/comments
https://api.github.com/repos/huggingface/datasets/issues/1961/timeline
2021-03-04T10:38:53Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgxNDM3NDI0
closed
[]
false
1,961
{ "avatar_url": "https://avatars.githubusercontent.com/u/15801338?v=4", "events_url": "https://api.github.com/users/patpizio/events{/privacy}", "followers_url": "https://api.github.com/users/patpizio/followers", "following_url": "https://api.github.com/users/patpizio/following{/other_user}", "gists_url": "https://api.github.com/users/patpizio/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patpizio", "id": 15801338, "login": "patpizio", "node_id": "MDQ6VXNlcjE1ODAxMzM4", "organizations_url": "https://api.github.com/users/patpizio/orgs", "received_events_url": "https://api.github.com/users/patpizio/received_events", "repos_url": "https://api.github.com/users/patpizio/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patpizio/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patpizio/subscriptions", "type": "User", "url": "https://api.github.com/users/patpizio" }
Add sst dataset
https://api.github.com/repos/huggingface/datasets/issues/1961/events
null
https://api.github.com/repos/huggingface/datasets/issues/1961/labels{/name}
2021-02-28T02:08:29Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1961.diff", "html_url": "https://github.com/huggingface/datasets/pull/1961", "merged_at": "2021-03-04T10:38:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/1961.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1961" }
818,077,947
[]
https://api.github.com/repos/huggingface/datasets/issues/1961
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Related to #1934&mdash;Add the Stanford Sentiment Treebank dataset.
2021-03-04T10:38:53Z
https://github.com/huggingface/datasets/pull/1961
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1961/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1960/comments
https://api.github.com/repos/huggingface/datasets/issues/1960/timeline
2021-03-23T15:26:49Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgxNDMzOTY4
closed
[]
false
1,960
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
Allow stateful function in dataset.map
https://api.github.com/repos/huggingface/datasets/issues/1960/events
null
https://api.github.com/repos/huggingface/datasets/issues/1960/labels{/name}
2021-02-28T01:29:05Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1960.diff", "html_url": "https://github.com/huggingface/datasets/pull/1960", "merged_at": "2021-03-23T15:26:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/1960.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1960" }
818,073,154
[]
https://api.github.com/repos/huggingface/datasets/issues/1960
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Removes the "test type" section in Dataset.map which would modify the state of the stateful function. Now, the return type of the map function is inferred after processing the first example. Fixes #1940 @lhoestq Not very happy with the usage of `nonlocal`. Would like to hear your opinion on this.
2021-03-23T15:26:49Z
https://github.com/huggingface/datasets/pull/1960
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1960/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1959/comments
https://api.github.com/repos/huggingface/datasets/issues/1959/timeline
2021-03-09T10:21:32Z
null
completed
MDU6SXNzdWU4MTgwNTU2NDQ=
closed
[]
null
1,959
{ "avatar_url": "https://avatars.githubusercontent.com/u/73159756?v=4", "events_url": "https://api.github.com/users/LedaguenelArthur/events{/privacy}", "followers_url": "https://api.github.com/users/LedaguenelArthur/followers", "following_url": "https://api.github.com/users/LedaguenelArthur/following{/other_user}", "gists_url": "https://api.github.com/users/LedaguenelArthur/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LedaguenelArthur", "id": 73159756, "login": "LedaguenelArthur", "node_id": "MDQ6VXNlcjczMTU5NzU2", "organizations_url": "https://api.github.com/users/LedaguenelArthur/orgs", "received_events_url": "https://api.github.com/users/LedaguenelArthur/received_events", "repos_url": "https://api.github.com/users/LedaguenelArthur/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LedaguenelArthur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LedaguenelArthur/subscriptions", "type": "User", "url": "https://api.github.com/users/LedaguenelArthur" }
Bug in skip_rows argument of load_dataset function ?
https://api.github.com/repos/huggingface/datasets/issues/1959/events
null
https://api.github.com/repos/huggingface/datasets/issues/1959/labels{/name}
2021-02-27T23:32:54Z
null
false
null
null
818,055,644
[]
https://api.github.com/repos/huggingface/datasets/issues/1959
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hello everyone, I'm quite new to Git so sorry in advance if I'm breaking some ground rules of issues posting... :/ I tried to use the load_dataset function, from Huggingface datasets library, on a csv file using the skip_rows argument described on Huggingface page to skip the first row containing column names `test_dataset = load_dataset('csv', data_files=['test_wLabel.tsv'], delimiter='\t', column_names=["id", "sentence", "label"], skip_rows=1)` But I got the following error message `__init__() got an unexpected keyword argument 'skip_rows'` Have I used the wrong argument ? Am I missing something or is this a bug ? Thank you very much for your time, Best regards, Arthur
2021-03-09T10:21:32Z
https://github.com/huggingface/datasets/issues/1959
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1959/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1958/comments
https://api.github.com/repos/huggingface/datasets/issues/1958/timeline
2021-02-27T21:50:16Z
null
completed
MDU6SXNzdWU4MTgwMzc1NDg=
closed
[]
null
1,958
{ "avatar_url": "https://avatars.githubusercontent.com/u/1156974?v=4", "events_url": "https://api.github.com/users/himat/events{/privacy}", "followers_url": "https://api.github.com/users/himat/followers", "following_url": "https://api.github.com/users/himat/following{/other_user}", "gists_url": "https://api.github.com/users/himat/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/himat", "id": 1156974, "login": "himat", "node_id": "MDQ6VXNlcjExNTY5NzQ=", "organizations_url": "https://api.github.com/users/himat/orgs", "received_events_url": "https://api.github.com/users/himat/received_events", "repos_url": "https://api.github.com/users/himat/repos", "site_admin": false, "starred_url": "https://api.github.com/users/himat/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/himat/subscriptions", "type": "User", "url": "https://api.github.com/users/himat" }
XSum dataset download link broken
https://api.github.com/repos/huggingface/datasets/issues/1958/events
null
https://api.github.com/repos/huggingface/datasets/issues/1958/labels{/name}
2021-02-27T21:47:56Z
null
false
null
null
818,037,548
[]
https://api.github.com/repos/huggingface/datasets/issues/1958
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I did ``` from datasets import load_dataset dataset = load_dataset("xsum") ``` This returns `ConnectionError: Couldn't reach http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz`
2021-02-27T21:50:16Z
https://github.com/huggingface/datasets/issues/1958
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1958/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1956/comments
https://api.github.com/repos/huggingface/datasets/issues/1956/timeline
2021-03-01T17:24:42Z
null
completed
MDU6SXNzdWU4MTgwMTM3NDE=
closed
[]
null
1,956
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
[distributed env] potentially unsafe parallel execution
https://api.github.com/repos/huggingface/datasets/issues/1956/events
null
https://api.github.com/repos/huggingface/datasets/issues/1956/labels{/name}
2021-02-27T20:38:45Z
null
false
null
null
818,013,741
[]
https://api.github.com/repos/huggingface/datasets/issues/1956
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
``` metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=rank) ``` presumes that there is only one set of parallel processes running - and will intermittently fail if you have multiple sets running as they will surely overwrite each other. Similar to https://github.com/huggingface/datasets/issues/1942 (but for a different reason). That's why dist environments use some unique to a group identifier so that each group is dealt with separately. e.g. the env-way of pytorch dist syncing is done with a unique per set `MASTER_ADDRESS+MASTER_PORT` So ideally this interface should ask for a shared secret to do the right thing. I'm not reporting an immediate need, but am only flagging that this will hit someone down the road. This problem can be remedied by adding a new optional `shared_secret` option, which can then be used to differentiate different groups of processes. and this secret should be part of the file lock name and the experiment. Thank you
2021-03-01T17:24:42Z
https://github.com/huggingface/datasets/issues/1956
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1956/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1955/comments
https://api.github.com/repos/huggingface/datasets/issues/1955/timeline
2021-03-01T14:43:19Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgxMzk2OTA5
closed
[]
false
1,955
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
typos + grammar
https://api.github.com/repos/huggingface/datasets/issues/1955/events
null
https://api.github.com/repos/huggingface/datasets/issues/1955/labels{/name}
2021-02-27T20:21:43Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1955.diff", "html_url": "https://github.com/huggingface/datasets/pull/1955", "merged_at": "2021-03-01T14:43:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/1955.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1955" }
818,010,664
[]
https://api.github.com/repos/huggingface/datasets/issues/1955
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR proposes a few typo + grammar fixes, and rewrites some sentences in an attempt to improve readability. N.B. When referring to the library `datasets` in the docs it is typically used as a singular, and it definitely is a singular when written as "`datasets` library", that is "`datasets` library is ..." and not "are ...".
2021-03-01T17:20:38Z
https://github.com/huggingface/datasets/pull/1955
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1955/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1954/comments
https://api.github.com/repos/huggingface/datasets/issues/1954/timeline
2021-04-29T14:50:43Z
null
completed
MDU6SXNzdWU4MTc1NjU1NjM=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1,954
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
add a new column
https://api.github.com/repos/huggingface/datasets/issues/1954/events
null
https://api.github.com/repos/huggingface/datasets/issues/1954/labels{/name}
2021-02-26T18:17:27Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
null
817,565,563
[]
https://api.github.com/repos/huggingface/datasets/issues/1954
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I'd need to add a new column to the dataset, I was wondering how this can be done? thanks @lhoestq
2021-04-29T14:50:43Z
https://github.com/huggingface/datasets/issues/1954
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1954/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1953/comments
https://api.github.com/repos/huggingface/datasets/issues/1953/timeline
2021-03-01T14:03:47Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgwOTgyMDMz
closed
[]
false
1,953
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Documentation for to_csv, to_pandas and to_dict
https://api.github.com/repos/huggingface/datasets/issues/1953/events
null
https://api.github.com/repos/huggingface/datasets/issues/1953/labels{/name}
2021-02-26T16:35:49Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1953.diff", "html_url": "https://github.com/huggingface/datasets/pull/1953", "merged_at": "2021-03-01T14:03:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/1953.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1953" }
817,498,869
[]
https://api.github.com/repos/huggingface/datasets/issues/1953
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
I added these methods to the documentation with a small paragraph. I also fixed some formatting issues in the docstrings
2021-03-01T14:03:48Z
https://github.com/huggingface/datasets/pull/1953
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1953/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1952/comments
https://api.github.com/repos/huggingface/datasets/issues/1952/timeline
2021-03-01T14:29:24Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgwOTIyNjQw
closed
[]
false
1,952
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Handle timeouts
https://api.github.com/repos/huggingface/datasets/issues/1952/events
null
https://api.github.com/repos/huggingface/datasets/issues/1952/labels{/name}
2021-02-26T15:02:07Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1952.diff", "html_url": "https://github.com/huggingface/datasets/pull/1952", "merged_at": "2021-03-01T14:29:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/1952.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1952" }
817,428,160
[]
https://api.github.com/repos/huggingface/datasets/issues/1952
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed in https://github.com/huggingface/datasets/issues/1939, timeouts were not properly handled when loading a dataset. This caused the connection to hang indefinitely when working in a firewalled environment cc @stas00 I added a default timeout, and included an option to our offline environment for tests to be able to simulate both connection errors and timeout errors (previously it was simulating connection errors only). Now networks calls don't hang indefinitely. The default timeout is set to 10sec (we might reduce it).
2021-03-01T14:29:24Z
https://github.com/huggingface/datasets/pull/1952
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1952/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1951/comments
https://api.github.com/repos/huggingface/datasets/issues/1951/timeline
2021-02-26T15:30:26Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgwOTE4ODE2
closed
[]
false
1,951
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
Add cross-platform support for datasets-cli
https://api.github.com/repos/huggingface/datasets/issues/1951/events
null
https://api.github.com/repos/huggingface/datasets/issues/1951/labels{/name}
2021-02-26T14:56:25Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1951.diff", "html_url": "https://github.com/huggingface/datasets/pull/1951", "merged_at": "2021-02-26T15:30:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/1951.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1951" }
817,423,573
[]
https://api.github.com/repos/huggingface/datasets/issues/1951
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
One thing I've noticed while going through the codebase is the usage of `scripts` in `setup.py`. This [answer](https://stackoverflow.com/a/28119736/14095927) on SO explains it nicely why it's better to use `entry_points` instead of `scripts`. To add cross-platform support to the CLI, this PR replaces `scripts` with `entry_points` in `setup.py` and moves datasets-cli to src/datasets/commands/datasets_cli.py. All *.md and *.rst files are updated accordingly. The same changes were made in the transformers repo to add cross-platform ([link to PR](https://github.com/huggingface/transformers/pull/4131)).
2021-03-11T02:18:26Z
https://github.com/huggingface/datasets/pull/1951
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1951/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1950/comments
https://api.github.com/repos/huggingface/datasets/issues/1950/timeline
2021-03-01T11:08:29Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgwODExMjMz
closed
[]
false
1,950
{ "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bhavitvyamalik", "id": 19718818, "login": "bhavitvyamalik", "node_id": "MDQ6VXNlcjE5NzE4ODE4", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "type": "User", "url": "https://api.github.com/users/bhavitvyamalik" }
updated multi_nli dataset with missing fields
https://api.github.com/repos/huggingface/datasets/issues/1950/events
null
https://api.github.com/repos/huggingface/datasets/issues/1950/labels{/name}
2021-02-26T11:54:36Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1950.diff", "html_url": "https://github.com/huggingface/datasets/pull/1950", "merged_at": "2021-03-01T11:08:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/1950.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1950" }
817,295,235
[]
https://api.github.com/repos/huggingface/datasets/issues/1950
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
1) updated fields which were missing earlier 2) added tags to README 3) updated a few fields of README 4) new dataset_infos.json and dummy files
2021-03-01T11:08:30Z
https://github.com/huggingface/datasets/pull/1950
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1950/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1949/comments
https://api.github.com/repos/huggingface/datasets/issues/1949/timeline
null
null
null
MDU6SXNzdWU4MTY5ODY5MzY=
open
[]
null
1,949
{ "avatar_url": "https://avatars.githubusercontent.com/u/29076344?v=4", "events_url": "https://api.github.com/users/gchhablani/events{/privacy}", "followers_url": "https://api.github.com/users/gchhablani/followers", "following_url": "https://api.github.com/users/gchhablani/following{/other_user}", "gists_url": "https://api.github.com/users/gchhablani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gchhablani", "id": 29076344, "login": "gchhablani", "node_id": "MDQ6VXNlcjI5MDc2MzQ0", "organizations_url": "https://api.github.com/users/gchhablani/orgs", "received_events_url": "https://api.github.com/users/gchhablani/received_events", "repos_url": "https://api.github.com/users/gchhablani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gchhablani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gchhablani/subscriptions", "type": "User", "url": "https://api.github.com/users/gchhablani" }
Enable Fast Filtering using Arrow Dataset
https://api.github.com/repos/huggingface/datasets/issues/1949/events
null
https://api.github.com/repos/huggingface/datasets/issues/1949/labels{/name}
2021-02-26T02:53:37Z
null
false
null
null
816,986,936
[]
https://api.github.com/repos/huggingface/datasets/issues/1949
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi @lhoestq, As mentioned in Issue #1796, I would love to work on enabling fast filtering/mapping. Can you please share the expectations? It would be great if you could point me to the relevant methods/files involved. Or the docs or maybe an overview of `arrow_dataset.py`. I only ask this because I am having trouble getting started ;-; Any help would be appreciated. Thanks, Gunjan
2021-02-26T19:18:29Z
https://github.com/huggingface/datasets/issues/1949
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1949/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1948/comments
https://api.github.com/repos/huggingface/datasets/issues/1948/timeline
2023-07-12T17:19:30Z
null
completed
MDU6SXNzdWU4MTY2ODkzMjk=
closed
[]
null
1,948
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
dataset loading logger level
https://api.github.com/repos/huggingface/datasets/issues/1948/events
null
https://api.github.com/repos/huggingface/datasets/issues/1948/labels{/name}
2021-02-25T18:33:37Z
null
false
null
null
816,689,329
[]
https://api.github.com/repos/huggingface/datasets/issues/1948
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
on master I get this with `--dataset_name wmt16 --dataset_config ro-en`: ``` WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/9dc00622c30446e99c4c63d12a484ea4fb653f2f37c867d6edcec839d7eae50f/cache-2e01bead8cf42e26.arrow WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/9dc00622c30446e99c4c63d12a484ea4fb653f2f37c867d6edcec839d7eae50f/cache-ac3bebaf4f91f776.arrow WARNING:datasets.arrow_dataset:Loading cached processed dataset at /home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/9dc00622c30446e99c4c63d12a484ea4fb653f2f37c867d6edcec839d7eae50f/cache-810c3e61259d73a9.arrow ``` why are those WARNINGs? Should be INFO, no? warnings should only be used when a user needs to pay attention to something, this is just informative - I'd even say it should be DEBUG, but definitely not WARNING. Thank you.
2023-07-12T17:19:30Z
https://github.com/huggingface/datasets/issues/1948
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1948/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1947/comments
https://api.github.com/repos/huggingface/datasets/issues/1947/timeline
2021-03-01T14:36:53Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgwMjI2MDk5
closed
[]
false
1,947
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Update documentation with not in place transforms and update DatasetDict
https://api.github.com/repos/huggingface/datasets/issues/1947/events
null
https://api.github.com/repos/huggingface/datasets/issues/1947/labels{/name}
2021-02-25T16:23:18Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1947.diff", "html_url": "https://github.com/huggingface/datasets/pull/1947", "merged_at": "2021-03-01T14:36:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/1947.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1947" }
816,590,299
[]
https://api.github.com/repos/huggingface/datasets/issues/1947
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
In #1883 were added the not in-place transforms `flatten`, `remove_columns`, `rename_column` and `cast`. I added them to the documentation and added a paragraph on how to use them You can preview the documentation [here](https://28862-250213286-gh.circle-artifacts.com/0/docs/_build/html/processing.html#renaming-removing-casting-and-flattening-columns) I also added these methods to the DatasetDict class.
2021-03-01T14:36:54Z
https://github.com/huggingface/datasets/pull/1947
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1947/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1946/comments
https://api.github.com/repos/huggingface/datasets/issues/1946/timeline
2021-03-12T09:42:48Z
null
null
MDExOlB1bGxSZXF1ZXN0NTgwMTcyNzI2
closed
[]
false
1,946
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Implement Dataset from CSV
https://api.github.com/repos/huggingface/datasets/issues/1946/events
null
https://api.github.com/repos/huggingface/datasets/issues/1946/labels{/name}
2021-02-25T15:10:13Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1946.diff", "html_url": "https://github.com/huggingface/datasets/pull/1946", "merged_at": "2021-03-12T09:42:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/1946.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1946" }
816,526,294
[]
https://api.github.com/repos/huggingface/datasets/issues/1946
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Implement `Dataset.from_csv`. Analogue to #1943. If finally, the scripts should be used instead, at least we can reuse the tests here.
2021-03-12T09:42:48Z
https://github.com/huggingface/datasets/pull/1946
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1946/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1945/comments
https://api.github.com/repos/huggingface/datasets/issues/1945/timeline
2021-02-25T13:20:26Z
null
completed
MDU6SXNzdWU4MTY0MjE5NjY=
closed
[]
null
1,945
{ "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dorost1234", "id": 79165106, "login": "dorost1234", "node_id": "MDQ6VXNlcjc5MTY1MTA2", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "repos_url": "https://api.github.com/users/dorost1234/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "type": "User", "url": "https://api.github.com/users/dorost1234" }
AttributeError: 'DatasetDict' object has no attribute 'concatenate_datasets'
https://api.github.com/repos/huggingface/datasets/issues/1945/events
null
https://api.github.com/repos/huggingface/datasets/issues/1945/labels{/name}
2021-02-25T13:09:45Z
null
false
null
null
816,421,966
[]
https://api.github.com/repos/huggingface/datasets/issues/1945
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Hi I am trying to concatenate a list of huggingface datastes as: ` train_dataset = datasets.concatenate_datasets(train_datasets) ` Here is the `train_datasets` when I print: ``` [Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 120361 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 2670 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 6944 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 38140 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 173711 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 1655 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 4274 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 2019 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 2109 }), Dataset({ features: ['attention_mask', 'idx', 'input_ids', 'label', 'question1', 'question2', 'token_type_ids'], num_rows: 11963 })] ``` I am getting the following error: `AttributeError: 'DatasetDict' object has no attribute 'concatenate_datasets' ` I was wondering if you could help me with this issue, thanks a lot
2021-02-25T13:20:35Z
https://github.com/huggingface/datasets/issues/1945
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1945/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1944/comments
https://api.github.com/repos/huggingface/datasets/issues/1944/timeline
2021-03-01T18:23:21Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc5OTU2Nzc3
closed
[]
false
1,944
{ "avatar_url": "https://avatars.githubusercontent.com/u/5150963?v=4", "events_url": "https://api.github.com/users/yavuzKomecoglu/events{/privacy}", "followers_url": "https://api.github.com/users/yavuzKomecoglu/followers", "following_url": "https://api.github.com/users/yavuzKomecoglu/following{/other_user}", "gists_url": "https://api.github.com/users/yavuzKomecoglu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yavuzKomecoglu", "id": 5150963, "login": "yavuzKomecoglu", "node_id": "MDQ6VXNlcjUxNTA5NjM=", "organizations_url": "https://api.github.com/users/yavuzKomecoglu/orgs", "received_events_url": "https://api.github.com/users/yavuzKomecoglu/received_events", "repos_url": "https://api.github.com/users/yavuzKomecoglu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yavuzKomecoglu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yavuzKomecoglu/subscriptions", "type": "User", "url": "https://api.github.com/users/yavuzKomecoglu" }
Add Turkish News Category Dataset (270K - Lite Version)
https://api.github.com/repos/huggingface/datasets/issues/1944/events
null
https://api.github.com/repos/huggingface/datasets/issues/1944/labels{/name}
2021-02-25T09:45:22Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1944.diff", "html_url": "https://github.com/huggingface/datasets/pull/1944", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1944.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1944" }
816,267,216
[]
https://api.github.com/repos/huggingface/datasets/issues/1944
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR adds the Turkish News Categories Dataset (270K - Lite Version) dataset which is a text classification dataset by me, @basakbuluz and @serdarakyol. This dataset contains the same news from the current [interpress_news_category_tr dataset](https://huggingface.co/datasets/interpress_news_category_tr) but contains less information, OCR errors are reduced, can be easily separated, and can be divided into 10 classes ("kültürsanat", "ekonomi", "siyaset", "eğitim", "dünya", "spor", "teknoloji", "magazin", "sağlık", "gündem") were rearranged. @SBrandeis @lhoestq, can you please review this PR?
2021-03-02T17:46:41Z
https://github.com/huggingface/datasets/pull/1944
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1944/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1943/comments
https://api.github.com/repos/huggingface/datasets/issues/1943/timeline
2021-03-18T09:42:08Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc5ODY5NTk0
closed
[]
false
1,943
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Implement Dataset from JSON and JSON Lines
https://api.github.com/repos/huggingface/datasets/issues/1943/events
null
https://api.github.com/repos/huggingface/datasets/issues/1943/labels{/name}
2021-02-25T07:17:33Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1943.diff", "html_url": "https://github.com/huggingface/datasets/pull/1943", "merged_at": "2021-03-18T09:42:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/1943.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1943" }
816,160,453
[]
https://api.github.com/repos/huggingface/datasets/issues/1943
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Implement `Dataset.from_jsonl`.
2021-03-18T09:42:08Z
https://github.com/huggingface/datasets/pull/1943
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1943/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1942/comments
https://api.github.com/repos/huggingface/datasets/issues/1942/timeline
2022-10-05T13:08:45Z
null
completed
MDU6SXNzdWU4MTYwMzc1MjA=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
1,942
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
[experiment] missing default_experiment-1-0.arrow
https://api.github.com/repos/huggingface/datasets/issues/1942/events
null
https://api.github.com/repos/huggingface/datasets/issues/1942/labels{/name}
2021-02-25T03:02:15Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
null
816,037,520
[]
https://api.github.com/repos/huggingface/datasets/issues/1942
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
the original report was pretty bad and incomplete - my apologies! Please see the complete version here: https://github.com/huggingface/datasets/issues/1942#issuecomment-786336481 ------------ As mentioned here https://github.com/huggingface/datasets/issues/1939 metrics don't get cached, looking at my local `~/.cache/huggingface/metrics` - there are many `*.arrow.lock` files but zero metrics files. w/o the network I get: ``` FileNotFoundError: [Errno 2] No such file or directory: '~/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow ``` there is just `~/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow.lock` I did run the same `run_seq2seq.py` script on the instance with network and it worked just fine, but only the lock file was left behind. this is with master. Thank you.
2022-10-05T13:08:45Z
https://github.com/huggingface/datasets/issues/1942
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1942/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1941/comments
https://api.github.com/repos/huggingface/datasets/issues/1941/timeline
2021-02-25T14:28:46Z
null
completed
MDU6SXNzdWU4MTU5ODUxNjc=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
1,941
{ "avatar_url": "https://avatars.githubusercontent.com/u/2992022?v=4", "events_url": "https://api.github.com/users/mkserge/events{/privacy}", "followers_url": "https://api.github.com/users/mkserge/followers", "following_url": "https://api.github.com/users/mkserge/following{/other_user}", "gists_url": "https://api.github.com/users/mkserge/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mkserge", "id": 2992022, "login": "mkserge", "node_id": "MDQ6VXNlcjI5OTIwMjI=", "organizations_url": "https://api.github.com/users/mkserge/orgs", "received_events_url": "https://api.github.com/users/mkserge/received_events", "repos_url": "https://api.github.com/users/mkserge/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mkserge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mkserge/subscriptions", "type": "User", "url": "https://api.github.com/users/mkserge" }
Loading of FAISS index fails for index_name = 'exact'
https://api.github.com/repos/huggingface/datasets/issues/1941/events
null
https://api.github.com/repos/huggingface/datasets/issues/1941/labels{/name}
2021-02-25T01:30:54Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
null
815,985,167
[]
https://api.github.com/repos/huggingface/datasets/issues/1941
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi, It looks like loading of FAISS index now fails when using index_name = 'exact'. For example, from the RAG [model card](https://huggingface.co/facebook/rag-token-nq?fbclid=IwAR3bTfhls5U_t9DqsX2Vzb7NhtRHxJxfQ-uwFT7VuCPMZUM2AdAlKF_qkI8#usage). Running `transformers==4.3.2` and datasets installed from source on latest `master` branch. ```bash (venv) sergey_mkrtchyan datasets (master) $ python Python 3.8.6 (v3.8.6:db455296be, Sep 23 2020, 13:31:39) [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration >>> tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) Using custom data configuration dummy.psgs_w100.nq.no_index-dummy=True,with_index=False Reusing dataset wiki_dpr (/Users/sergey_mkrtchyan/.cache/huggingface/datasets/wiki_dpr/dummy.psgs_w100.nq.no_index-dummy=True,with_index=False/0.0.0/8a97e0f4fa5bc46e179474db6a61b09d5d2419d2911835bd3f91d110c936d8bb) Using custom data configuration dummy.psgs_w100.nq.exact-50b6cda57ff32ab4 Reusing dataset wiki_dpr (/Users/sergey_mkrtchyan/.cache/huggingface/datasets/wiki_dpr/dummy.psgs_w100.nq.exact-50b6cda57ff32ab4/0.0.0/8a97e0f4fa5bc46e179474db6a61b09d5d2419d2911835bd3f91d110c936d8bb) 0%| | 0/10 [00:00<?, ?it/s] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/sergey_mkrtchyan/workspace/cformers/venv/lib/python3.8/site-packages/transformers/models/rag/retrieval_rag.py", line 425, in from_pretrained return cls( File "/Users/sergey_mkrtchyan/workspace/cformers/venv/lib/python3.8/site-packages/transformers/models/rag/retrieval_rag.py", line 387, in __init__ self.init_retrieval() File "/Users/sergey_mkrtchyan/workspace/cformers/venv/lib/python3.8/site-packages/transformers/models/rag/retrieval_rag.py", line 458, in init_retrieval self.index.init_index() File "/Users/sergey_mkrtchyan/workspace/cformers/venv/lib/python3.8/site-packages/transformers/models/rag/retrieval_rag.py", line 284, in init_index self.dataset = load_dataset( File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/load.py", line 750, in load_dataset ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory) File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/builder.py", line 734, in as_dataset datasets = utils.map_nested( File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/utils/py_utils.py", line 195, in map_nested return function(data_struct) File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/builder.py", line 769, in _build_single_dataset post_processed = self._post_process(ds, resources_paths) File "/Users/sergey_mkrtchyan/.cache/huggingface/modules/datasets_modules/datasets/wiki_dpr/8a97e0f4fa5bc46e179474db6a61b09d5d2419d2911835bd3f91d110c936d8bb/wiki_dpr.py", line 205, in _post_process dataset.add_faiss_index("embeddings", custom_index=index) File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/arrow_dataset.py", line 2516, in add_faiss_index super().add_faiss_index( File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/search.py", line 416, in add_faiss_index faiss_index.add_vectors(self, column=column, train_size=train_size, faiss_verbose=faiss_verbose) File "/Users/sergey_mkrtchyan/workspace/huggingface/datasets/src/datasets/search.py", line 281, in add_vectors self.faiss_index.add(vecs) File "/Users/sergey_mkrtchyan/workspace/cformers/venv/lib/python3.8/site-packages/faiss/__init__.py", line 104, in replacement_add self.add_c(n, swig_ptr(x)) File "/Users/sergey_mkrtchyan/workspace/cformers/venv/lib/python3.8/site-packages/faiss/swigfaiss.py", line 3263, in add return _swigfaiss.IndexHNSW_add(self, n, x) RuntimeError: Error in virtual void faiss::IndexHNSW::add(faiss::Index::idx_t, const float *) at /Users/runner/work/faiss-wheels/faiss-wheels/faiss/faiss/IndexHNSW.cpp:356: Error: 'is_trained' failed >>> ``` The issue seems to be related to the scalar quantization in faiss added in this commit: 8c5220307c33f00e01c3bf7b8. Reverting it fixes the issue.
2021-02-25T14:28:46Z
https://github.com/huggingface/datasets/issues/1941
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1941/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1940/comments
https://api.github.com/repos/huggingface/datasets/issues/1940/timeline
2021-03-23T15:26:49Z
null
completed
MDU6SXNzdWU4MTU3NzAwMTI=
closed
[]
null
1,940
{ "avatar_url": "https://avatars.githubusercontent.com/u/918006?v=4", "events_url": "https://api.github.com/users/francisco-perez-sorrosal/events{/privacy}", "followers_url": "https://api.github.com/users/francisco-perez-sorrosal/followers", "following_url": "https://api.github.com/users/francisco-perez-sorrosal/following{/other_user}", "gists_url": "https://api.github.com/users/francisco-perez-sorrosal/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/francisco-perez-sorrosal", "id": 918006, "login": "francisco-perez-sorrosal", "node_id": "MDQ6VXNlcjkxODAwNg==", "organizations_url": "https://api.github.com/users/francisco-perez-sorrosal/orgs", "received_events_url": "https://api.github.com/users/francisco-perez-sorrosal/received_events", "repos_url": "https://api.github.com/users/francisco-perez-sorrosal/repos", "site_admin": false, "starred_url": "https://api.github.com/users/francisco-perez-sorrosal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/francisco-perez-sorrosal/subscriptions", "type": "User", "url": "https://api.github.com/users/francisco-perez-sorrosal" }
Side effect when filtering data due to `does_function_return_dict` call in `Dataset.map()`
https://api.github.com/repos/huggingface/datasets/issues/1940/events
null
https://api.github.com/repos/huggingface/datasets/issues/1940/labels{/name}
2021-02-24T19:18:56Z
null
false
null
null
815,770,012
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
https://api.github.com/repos/huggingface/datasets/issues/1940
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi there! In my codebase I have a function to filter rows in a dataset, selecting only a certain number of examples per class. The function passes a extra argument to maintain a counter of the number of dataset rows/examples already selected per each class, which are the ones I want to keep in the end: ```python def fill_train_examples_per_class(example, per_class_limit: int, counter: collections.Counter): label = int(example['label']) current_counter = counter.get(label, 0) if current_counter < per_class_limit: counter[label] = current_counter + 1 return True return False ``` At some point I invoke it through the `Dataset.filter()` method in the `arrow_dataset.py` module like this: ```python ... kwargs = {"per_class_limit": train_examples_per_class_limit, "counter": Counter()} datasets['train'] = datasets['train'].filter(fill_train_examples_per_class, num_proc=1, fn_kwargs=kwargs) ... ``` The problem is that, passing a stateful container (the counter,) provokes a side effect in the new filtered dataset obtained. This is due to the fact that at some point in `filter()`, the `map()`'s function `does_function_return_dict` is invoked in line [1290](https://github.com/huggingface/datasets/blob/96578adface7e4bc1f3e8bafbac920d72ca1ca60/src/datasets/arrow_dataset.py#L1290). When this occurs, the state of the counter is initially modified by the effects of the function call on the 1 or 2 rows selected in lines 1288 and 1289 of the same file (which are marked as `test_inputs` & `test_indices` respectively in lines 1288 and 1289. This happens out of the control of the user (which for example can't reset the state of the counter before continuing the execution,) provoking in the end an undesired side effect in the results obtained. In my case, the resulting dataset -despite of the counter results are ok- lacks an instance of the classes 0 and 1 (which happen to be the classes of the first two examples of my dataset.) The rest of the classes I have in my dataset, contain the right number of examples as they were not affected by the effects of `does_function_return_dict` call. I've debugged my code extensively and made a workaround myself hardcoding the necessary stuff (basically putting `update_data=True` in line 1290,) and then I obtain the results I expected without the side effect. Is there a way to avoid that call to `does_function_return_dict` in map()'s line 1290 ? (e.g. extracting the required information that `does_function_return_dict` returns without making the testing calls to the user function on dataset rows 0 & 1) Thanks in advance, Francisco Perez-Sorrosal
2021-03-23T15:26:49Z
https://github.com/huggingface/datasets/issues/1940
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1940/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1939/comments
https://api.github.com/repos/huggingface/datasets/issues/1939/timeline
2021-03-05T05:09:54Z
null
completed
MDU6SXNzdWU4MTU2ODA1MTA=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
1,939
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
[firewalled env] OFFLINE mode
https://api.github.com/repos/huggingface/datasets/issues/1939/events
null
https://api.github.com/repos/huggingface/datasets/issues/1939/labels{/name}
2021-02-24T17:13:42Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
null
815,680,510
[]
https://api.github.com/repos/huggingface/datasets/issues/1939
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This issue comes from a need to be able to run `datasets` in a firewalled env, which currently makes the software hang until it times out, as it's unable to complete the network calls. I propose the following approach to solving this problem, using the example of `run_seq2seq.py` as a sample program. There are 2 possible ways to going about it. ## 1. Manual manually prepare data and metrics files, that is transfer to the firewalled instance the dataset and the metrics and run: ``` DATASETS_OFFLINE=1 run_seq2seq.py --train_file xyz.csv --validation_file xyz.csv ... ``` `datasets` must not make any network calls and if there is a logic to do that and something is missing it should assert that this or that action requires network and therefore it can't proceed. ## 2. Automatic In some clouds one can prepare a datastorage ahead of time with a normal networked environment but which doesn't have gpus and then one switches to the gpu instance which is firewalled, but it can access all the cached data. This is the ideal situation, since in this scenario we don't have to do anything manually, but simply run the same application twice: 1. on the non-firewalled instance: ``` run_seq2seq.py --dataset_name wmt16 --dataset_config ro-en ... ``` which should download and cached everything. 2. and then immediately after on the firewalled instance, which shares the same filesystem ``` DATASETS_OFFLINE=1 run_seq2seq.py --dataset_name wmt16 --dataset_config ro-en ... ``` and the metrics and datasets should be cached by the invocation number 1 and any network calls be skipped and if the logic is missing data it should assert and not try to fetch any data from online. ## Common Issues 1. for example currently `datasets` tries to look up online datasets if the files contain json or csv, despite the paths already provided ``` if dataset and path in _PACKAGED_DATASETS_MODULES: ``` 2. it has an issue with metrics. e.g. I had to manually copy `rouge/rouge.py` from the `datasets` repo to the current dir - or it was hanging. I had to comment out `head_hf_s3(...)` calls to make things work. So all those `try: head_hf_s3(...)` shouldn't be tried with `DATASETS_OFFLINE=1` Here is the corresponding issue for `transformers`: https://github.com/huggingface/transformers/issues/10379 Thanks.
2021-03-05T05:09:54Z
https://github.com/huggingface/datasets/issues/1939
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1939/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1938/comments
https://api.github.com/repos/huggingface/datasets/issues/1938/timeline
2021-02-25T11:27:29Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc5NDQyNDkw
closed
[]
false
1,938
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Disallow ClassLabel with no names
https://api.github.com/repos/huggingface/datasets/issues/1938/events
null
https://api.github.com/repos/huggingface/datasets/issues/1938/labels{/name}
2021-02-24T16:37:57Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1938.diff", "html_url": "https://github.com/huggingface/datasets/pull/1938", "merged_at": "2021-02-25T11:27:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/1938.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1938" }
815,647,774
[]
https://api.github.com/repos/huggingface/datasets/issues/1938
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
It was possible to create a ClassLabel without specifying the names or the number of classes. This was causing silent issues as in #1936 and breaking the conversion methods str2int and int2str. cc @justin-yan
2021-02-25T11:27:29Z
https://github.com/huggingface/datasets/pull/1938
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1938/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1937/comments
https://api.github.com/repos/huggingface/datasets/issues/1937/timeline
2021-02-26T11:10:06Z
null
completed
MDU6SXNzdWU4MTUxNjM5NDM=
closed
[]
null
1,937
{ "avatar_url": "https://avatars.githubusercontent.com/u/10104354?v=4", "events_url": "https://api.github.com/users/yuchenlin/events{/privacy}", "followers_url": "https://api.github.com/users/yuchenlin/followers", "following_url": "https://api.github.com/users/yuchenlin/following{/other_user}", "gists_url": "https://api.github.com/users/yuchenlin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yuchenlin", "id": 10104354, "login": "yuchenlin", "node_id": "MDQ6VXNlcjEwMTA0MzU0", "organizations_url": "https://api.github.com/users/yuchenlin/orgs", "received_events_url": "https://api.github.com/users/yuchenlin/received_events", "repos_url": "https://api.github.com/users/yuchenlin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yuchenlin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yuchenlin/subscriptions", "type": "User", "url": "https://api.github.com/users/yuchenlin" }
CommonGen dataset page shows an error OSError: [Errno 28] No space left on device
https://api.github.com/repos/huggingface/datasets/issues/1937/events
null
https://api.github.com/repos/huggingface/datasets/issues/1937/labels{/name}
2021-02-24T06:47:33Z
null
false
null
null
815,163,943
[ { "color": "94203D", "default": false, "description": "", "id": 2107841032, "name": "nlp-viewer", "node_id": "MDU6TGFiZWwyMTA3ODQxMDMy", "url": "https://api.github.com/repos/huggingface/datasets/labels/nlp-viewer" } ]
https://api.github.com/repos/huggingface/datasets/issues/1937
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
The page of the CommonGen data https://huggingface.co/datasets/viewer/?dataset=common_gen shows ![image](https://user-images.githubusercontent.com/10104354/108959311-1865e600-7629-11eb-868c-cf4cb27034ea.png)
2021-02-26T11:10:06Z
https://github.com/huggingface/datasets/issues/1937
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1937/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1936/comments
https://api.github.com/repos/huggingface/datasets/issues/1936/timeline
2022-03-09T18:46:22Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc4NjY3NTQ4
closed
[]
false
1,936
{ "avatar_url": "https://avatars.githubusercontent.com/u/7731709?v=4", "events_url": "https://api.github.com/users/justin-yan/events{/privacy}", "followers_url": "https://api.github.com/users/justin-yan/followers", "following_url": "https://api.github.com/users/justin-yan/following{/other_user}", "gists_url": "https://api.github.com/users/justin-yan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/justin-yan", "id": 7731709, "login": "justin-yan", "node_id": "MDQ6VXNlcjc3MzE3MDk=", "organizations_url": "https://api.github.com/users/justin-yan/orgs", "received_events_url": "https://api.github.com/users/justin-yan/received_events", "repos_url": "https://api.github.com/users/justin-yan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/justin-yan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/justin-yan/subscriptions", "type": "User", "url": "https://api.github.com/users/justin-yan" }
[WIP] Adding Support for Reading Pandas Category
https://api.github.com/repos/huggingface/datasets/issues/1936/events
null
https://api.github.com/repos/huggingface/datasets/issues/1936/labels{/name}
2021-02-23T18:32:54Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1936.diff", "html_url": "https://github.com/huggingface/datasets/pull/1936", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1936.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1936" }
814,726,512
[]
https://api.github.com/repos/huggingface/datasets/issues/1936
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
@lhoestq - continuing our conversation from https://github.com/huggingface/datasets/issues/1906#issuecomment-784247014 The goal of this PR is to support `Dataset.from_pandas(df)` where the dataframe contains a Category. Just the 4 line change below actually does seem to work: ``` >>> from datasets import Dataset >>> import pandas as pd >>> df = pd.DataFrame(pd.Series(["a", "b", "c", "a"], dtype="category")) >>> ds = Dataset.from_pandas(df) >>> ds.to_pandas() 0 0 a 1 b 2 c 3 a >>> ds.to_pandas().dtypes 0 category dtype: object ``` save_to_disk, etc. all seem to work as well. The main things that are theoretically "incorrect" if we leave this are: ``` >>> ds.features.type StructType(struct<0: int64>) ``` there are a decent number of references to this property in the library, but I can't find anything that seems to actually break as a result of this being int64 vs. dictionary? I think the gist of my question is: a) do we *need* to change the dtype of Classlabel and have get_nested_type return a pyarrow.DictionaryType instead of int64? and b) do you *want* it to change? The biggest challenge I see to implementing this correctly is that the data will need to be passed in along with the pyarrow schema when instantiating the Classlabel (I *think* this is unavoidable, since the type itself doesn't contain the actual label values) which could be a fairly intrusive change - e.g. `from_arrow_schema`'s interface would need to change to include optional arrow data? Once we start going down this path of modifying the public interfaces I am admittedly feeling a little bit outside of my comfort zone Additionally I think `int2str`, `str2int`, and `encode_example` probably won't work - but I can't find any usages of them in the library itself.
2022-03-09T18:46:22Z
https://github.com/huggingface/datasets/pull/1936
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1936/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1935/comments
https://api.github.com/repos/huggingface/datasets/issues/1935/timeline
2021-02-24T18:05:09Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc4NTgyMzk1
closed
[]
false
1,935
{ "avatar_url": "https://avatars.githubusercontent.com/u/27137566?v=4", "events_url": "https://api.github.com/users/patil-suraj/events{/privacy}", "followers_url": "https://api.github.com/users/patil-suraj/followers", "following_url": "https://api.github.com/users/patil-suraj/following{/other_user}", "gists_url": "https://api.github.com/users/patil-suraj/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patil-suraj", "id": 27137566, "login": "patil-suraj", "node_id": "MDQ6VXNlcjI3MTM3NTY2", "organizations_url": "https://api.github.com/users/patil-suraj/orgs", "received_events_url": "https://api.github.com/users/patil-suraj/received_events", "repos_url": "https://api.github.com/users/patil-suraj/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patil-suraj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patil-suraj/subscriptions", "type": "User", "url": "https://api.github.com/users/patil-suraj" }
add CoVoST2
https://api.github.com/repos/huggingface/datasets/issues/1935/events
null
https://api.github.com/repos/huggingface/datasets/issues/1935/labels{/name}
2021-02-23T16:28:16Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1935.diff", "html_url": "https://github.com/huggingface/datasets/pull/1935", "merged_at": "2021-02-24T18:05:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/1935.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1935" }
814,623,827
[]
https://api.github.com/repos/huggingface/datasets/issues/1935
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
This PR adds the CoVoST2 dataset for speech translation and ASR. https://github.com/facebookresearch/covost#covost-2 The dataset requires manual download as the download page requests an email address and the URLs are temporary. The dummy data is a bit bigger because of the mp3 files and 36 configs.
2021-02-24T18:09:32Z
https://github.com/huggingface/datasets/pull/1935
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1935/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1934/comments
https://api.github.com/repos/huggingface/datasets/issues/1934/timeline
2021-03-18T17:51:44Z
null
completed
MDU6SXNzdWU4MTQ0MzcxOTA=
closed
[]
null
1,934
{ "avatar_url": "https://avatars.githubusercontent.com/u/15801338?v=4", "events_url": "https://api.github.com/users/patpizio/events{/privacy}", "followers_url": "https://api.github.com/users/patpizio/followers", "following_url": "https://api.github.com/users/patpizio/following{/other_user}", "gists_url": "https://api.github.com/users/patpizio/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patpizio", "id": 15801338, "login": "patpizio", "node_id": "MDQ6VXNlcjE1ODAxMzM4", "organizations_url": "https://api.github.com/users/patpizio/orgs", "received_events_url": "https://api.github.com/users/patpizio/received_events", "repos_url": "https://api.github.com/users/patpizio/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patpizio/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patpizio/subscriptions", "type": "User", "url": "https://api.github.com/users/patpizio" }
Add Stanford Sentiment Treebank (SST)
https://api.github.com/repos/huggingface/datasets/issues/1934/events
null
https://api.github.com/repos/huggingface/datasets/issues/1934/labels{/name}
2021-02-23T12:53:16Z
null
false
null
null
814,437,190
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
https://api.github.com/repos/huggingface/datasets/issues/1934
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
I am going to add SST: - **Name:** The Stanford Sentiment Treebank - **Description:** The first corpus with fully labeled parse trees that allows for a complete analysis of the compositional effects of sentiment in language - **Paper:** [Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) - **Data:** https://nlp.stanford.edu/sentiment/index.html - **Motivation:** Already requested in #353, SST is a popular dataset for Sentiment Classification What's the difference with the [_SST-2_](https://huggingface.co/datasets/viewer/?dataset=glue&config=sst2) dataset included in GLUE? Essentially, SST-2 is a version of SST where: - the labels were mapped from real numbers in [0.0, 1.0] to a binary label: {0, 1} - the labels of the *sub-sentences* were included only in the training set - the labels in the test set are obfuscated So there is a lot more information in the original SST. The tricky bit is, the data is scattered into many text files and, for one in particular, I couldn't find the original encoding ([*but I'm not the only one*](https://groups.google.com/g/word2vec-toolkit/c/QIUjLw6RqFk/m/_iEeyt428wkJ) 🎵). The only solution I found was to manually replace all the è, ë, ç and so on into an `utf-8` copy of the text file. I uploaded the result in my Dropbox and I am using that as the main repo for the dataset. Also, the _sub-sentences_ are built at run-time from the information encoded in several text files, so generating the examples is a bit more cumbersome than usual. Luckily, the dataset is not enormous. I plan to divide the dataset in 2 configs: one with just whole sentences with their labels, the other with sentences _and their sub-sentences_ with their labels. Each config will be split in train, validation and test. Hopefully this makes sense, we may discuss it in the PR I'm going to submit.
2021-03-18T17:51:44Z
https://github.com/huggingface/datasets/issues/1934
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1934/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1933/comments
https://api.github.com/repos/huggingface/datasets/issues/1933/timeline
2023-09-25T09:20:38Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc4MzQwMzk3
closed
[]
true
1,933
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Use arrow ipc file format
https://api.github.com/repos/huggingface/datasets/issues/1933/events
null
https://api.github.com/repos/huggingface/datasets/issues/1933/labels{/name}
2021-02-23T10:38:24Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1933.diff", "html_url": "https://github.com/huggingface/datasets/pull/1933", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1933.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1933" }
814,335,846
[]
https://api.github.com/repos/huggingface/datasets/issues/1933
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
According to the [documentation](https://arrow.apache.org/docs/format/Columnar.html?highlight=arrow1#ipc-file-format), it's identical to the streaming format except that it contains the memory offsets of each sample: > We define a “file format” supporting random access that is build with the stream format. The file starts and ends with a magic string ARROW1 (plus padding). What follows in the file is identical to the stream format. At the end of the file, we write a footer containing a redundant copy of the schema (which is a part of the streaming format) plus memory offsets and sizes for each of the data blocks in the file. This enables random access any record batch in the file. See File.fbs for the precise details of the file footer. Since it stores more metadata regarding the positions of the examples in the file, it should enable better example retrieval performances. However from the discussion in https://github.com/huggingface/datasets/issues/1803 it looks like it's not the case unfortunately. Maybe in the future this will allow speed gains. I think it's still a good idea to start using it anyway for these reasons: - in the future we may have speed gains - it contains the arrow streaming format data - it's compatible with the pyarrow Dataset implementation (it allows to load remote dataframes for example) if we want to use it in the future - it's also the format used by arrow feather if we want to use it in the future - it's roughly the same size as the streaming format - it's easy to have backward compatibility with the streaming format
2023-10-30T16:20:19Z
https://github.com/huggingface/datasets/pull/1933
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1933/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1932/comments
https://api.github.com/repos/huggingface/datasets/issues/1932/timeline
2021-02-23T10:45:27Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc4MzMyMTQy
closed
[]
false
1,932
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix builder config creation with data_dir
https://api.github.com/repos/huggingface/datasets/issues/1932/events
null
https://api.github.com/repos/huggingface/datasets/issues/1932/labels{/name}
2021-02-23T10:26:02Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1932.diff", "html_url": "https://github.com/huggingface/datasets/pull/1932", "merged_at": "2021-02-23T10:45:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/1932.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1932" }
814,326,116
[]
https://api.github.com/repos/huggingface/datasets/issues/1932
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
The data_dir parameter wasn't taken into account to create the config_id, therefore the resulting builder config was considered not custom. However a builder config that is non-custom must not have a name that collides with the predefined builder config names. Therefore it resulted in a `ValueError("Cannot name a custom BuilderConfig the same as an available...")` I fixed that by commenting the line that used to ignore the data_dir when creating the config. It was previously ignored before the introduction of config id because we didn't want to change the config name. Now it's fine to take it into account for the config id. Now creating a config with a data_dir works again @patrickvonplaten
2021-02-23T10:45:28Z
https://github.com/huggingface/datasets/pull/1932
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1932/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1931/comments
https://api.github.com/repos/huggingface/datasets/issues/1931/timeline
2021-03-01T10:01:03Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc4MjQ4NTA5
closed
[]
false
1,931
{ "avatar_url": "https://avatars.githubusercontent.com/u/13961899?v=4", "events_url": "https://api.github.com/users/pdufter/events{/privacy}", "followers_url": "https://api.github.com/users/pdufter/followers", "following_url": "https://api.github.com/users/pdufter/following{/other_user}", "gists_url": "https://api.github.com/users/pdufter/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pdufter", "id": 13961899, "login": "pdufter", "node_id": "MDQ6VXNlcjEzOTYxODk5", "organizations_url": "https://api.github.com/users/pdufter/orgs", "received_events_url": "https://api.github.com/users/pdufter/received_events", "repos_url": "https://api.github.com/users/pdufter/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pdufter/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pdufter/subscriptions", "type": "User", "url": "https://api.github.com/users/pdufter" }
add m_lama (multilingual lama) dataset
https://api.github.com/repos/huggingface/datasets/issues/1931/events
null
https://api.github.com/repos/huggingface/datasets/issues/1931/labels{/name}
2021-02-23T08:11:57Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1931.diff", "html_url": "https://github.com/huggingface/datasets/pull/1931", "merged_at": "2021-03-01T10:01:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/1931.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1931" }
814,225,074
[]
https://api.github.com/repos/huggingface/datasets/issues/1931
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Add a multilingual (machine translated and automatically generated) version of the LAMA benchmark. For details see the paper https://arxiv.org/pdf/2102.00894.pdf
2021-03-01T10:01:03Z
https://github.com/huggingface/datasets/pull/1931
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1931/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1930/comments
https://api.github.com/repos/huggingface/datasets/issues/1930/timeline
2021-04-07T15:24:56Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc4MTAwNzI0
closed
[]
false
1,930
{ "avatar_url": "https://avatars.githubusercontent.com/u/22306304?v=4", "events_url": "https://api.github.com/users/JieyuZhao/events{/privacy}", "followers_url": "https://api.github.com/users/JieyuZhao/followers", "following_url": "https://api.github.com/users/JieyuZhao/following{/other_user}", "gists_url": "https://api.github.com/users/JieyuZhao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JieyuZhao", "id": 22306304, "login": "JieyuZhao", "node_id": "MDQ6VXNlcjIyMzA2MzA0", "organizations_url": "https://api.github.com/users/JieyuZhao/orgs", "received_events_url": "https://api.github.com/users/JieyuZhao/received_events", "repos_url": "https://api.github.com/users/JieyuZhao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JieyuZhao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JieyuZhao/subscriptions", "type": "User", "url": "https://api.github.com/users/JieyuZhao" }
updated the wino_bias dataset
https://api.github.com/repos/huggingface/datasets/issues/1930/events
null
https://api.github.com/repos/huggingface/datasets/issues/1930/labels{/name}
2021-02-23T03:07:40Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1930.diff", "html_url": "https://github.com/huggingface/datasets/pull/1930", "merged_at": "2021-04-07T15:24:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/1930.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1930" }
814,055,198
[]
https://api.github.com/repos/huggingface/datasets/issues/1930
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Updated the wino_bias.py script. - updated the data_url - added different configurations for different data splits - added the coreference_cluster to the data features
2021-04-07T15:24:56Z
https://github.com/huggingface/datasets/pull/1930
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1930/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1929/comments
https://api.github.com/repos/huggingface/datasets/issues/1929/timeline
2021-02-24T14:03:54Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3OTk1MTE4
closed
[]
false
1,929
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
Improve typing and style and fix some inconsistencies
https://api.github.com/repos/huggingface/datasets/issues/1929/events
null
https://api.github.com/repos/huggingface/datasets/issues/1929/labels{/name}
2021-02-22T22:47:41Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1929.diff", "html_url": "https://github.com/huggingface/datasets/pull/1929", "merged_at": "2021-02-24T14:03:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/1929.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1929" }
813,929,669
[]
https://api.github.com/repos/huggingface/datasets/issues/1929
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR: * improves typing (mostly more consistent use of `typing.Optional`) * `DatasetDict.cleanup_cache_files` now correctly returns a dict * replaces `dict()` with the corresponding literal * uses `dict_to_copy.copy()` instead of `dict(dict_to_copy)` for shallow copying
2021-02-24T16:16:14Z
https://github.com/huggingface/datasets/pull/1929
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1929/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1928/comments
https://api.github.com/repos/huggingface/datasets/issues/1928/timeline
2021-02-23T18:19:25Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3ODgyMDM4
closed
[]
false
1,928
{ "avatar_url": "https://avatars.githubusercontent.com/u/26722925?v=4", "events_url": "https://api.github.com/users/mcmillanmajora/events{/privacy}", "followers_url": "https://api.github.com/users/mcmillanmajora/followers", "following_url": "https://api.github.com/users/mcmillanmajora/following{/other_user}", "gists_url": "https://api.github.com/users/mcmillanmajora/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mcmillanmajora", "id": 26722925, "login": "mcmillanmajora", "node_id": "MDQ6VXNlcjI2NzIyOTI1", "organizations_url": "https://api.github.com/users/mcmillanmajora/orgs", "received_events_url": "https://api.github.com/users/mcmillanmajora/received_events", "repos_url": "https://api.github.com/users/mcmillanmajora/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mcmillanmajora/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mcmillanmajora/subscriptions", "type": "User", "url": "https://api.github.com/users/mcmillanmajora" }
Updating old cards
https://api.github.com/repos/huggingface/datasets/issues/1928/events
null
https://api.github.com/repos/huggingface/datasets/issues/1928/labels{/name}
2021-02-22T19:26:04Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1928.diff", "html_url": "https://github.com/huggingface/datasets/pull/1928", "merged_at": "2021-02-23T18:19:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/1928.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1928" }
813,793,434
[]
https://api.github.com/repos/huggingface/datasets/issues/1928
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Updated the cards for [Allocine](https://github.com/mcmillanmajora/datasets/tree/updating-old-cards/datasets/allocine), [CNN/DailyMail](https://github.com/mcmillanmajora/datasets/tree/updating-old-cards/datasets/cnn_dailymail), and [SNLI](https://github.com/mcmillanmajora/datasets/tree/updating-old-cards/datasets/snli). For the most part, the information was just rearranged or rephrased, but the social impact statements are new.
2021-02-23T18:19:25Z
https://github.com/huggingface/datasets/pull/1928
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1928/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1927/comments
https://api.github.com/repos/huggingface/datasets/issues/1927/timeline
2022-09-23T13:35:08Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3ODYxODM5
closed
[]
false
1,927
{ "avatar_url": "https://avatars.githubusercontent.com/u/22306304?v=4", "events_url": "https://api.github.com/users/JieyuZhao/events{/privacy}", "followers_url": "https://api.github.com/users/JieyuZhao/followers", "following_url": "https://api.github.com/users/JieyuZhao/following{/other_user}", "gists_url": "https://api.github.com/users/JieyuZhao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JieyuZhao", "id": 22306304, "login": "JieyuZhao", "node_id": "MDQ6VXNlcjIyMzA2MzA0", "organizations_url": "https://api.github.com/users/JieyuZhao/orgs", "received_events_url": "https://api.github.com/users/JieyuZhao/received_events", "repos_url": "https://api.github.com/users/JieyuZhao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JieyuZhao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JieyuZhao/subscriptions", "type": "User", "url": "https://api.github.com/users/JieyuZhao" }
Update dataset card of wino_bias
https://api.github.com/repos/huggingface/datasets/issues/1927/events
null
https://api.github.com/repos/huggingface/datasets/issues/1927/labels{/name}
2021-02-22T18:51:34Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1927.diff", "html_url": "https://github.com/huggingface/datasets/pull/1927", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1927.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1927" }
813,768,935
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
https://api.github.com/repos/huggingface/datasets/issues/1927
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Updated the info for the wino_bias dataset.
2022-09-23T13:35:09Z
https://github.com/huggingface/datasets/pull/1927
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1927/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1926/comments
https://api.github.com/repos/huggingface/datasets/issues/1926/timeline
2021-02-22T15:49:53Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3NzI4Mjgy
closed
[]
false
1,926
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix: Wiki_dpr - add missing scalar quantizer
https://api.github.com/repos/huggingface/datasets/issues/1926/events
null
https://api.github.com/repos/huggingface/datasets/issues/1926/labels{/name}
2021-02-22T15:32:05Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1926.diff", "html_url": "https://github.com/huggingface/datasets/pull/1926", "merged_at": "2021-02-22T15:49:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/1926.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1926" }
813,607,994
[]
https://api.github.com/repos/huggingface/datasets/issues/1926
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
All the prebuilt wiki_dpr indexes already use SQ8, I forgot to update the wiki_dpr script after building them. Now it's finally done. The scalar quantizer SQ8 doesn't reduce the performance of the index as shown in retrieval experiments on RAG. The quantizer reduces the size of the index a lot but increases index building time.
2021-02-22T15:49:54Z
https://github.com/huggingface/datasets/pull/1926
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1926/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1925/comments
https://api.github.com/repos/huggingface/datasets/issues/1925/timeline
2021-02-22T15:36:08Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3NzIyMzc3
closed
[]
false
1,925
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix: Wiki_dpr - fix when with_embeddings is False or index_name is "no_index"
https://api.github.com/repos/huggingface/datasets/issues/1925/events
null
https://api.github.com/repos/huggingface/datasets/issues/1925/labels{/name}
2021-02-22T15:23:46Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1925.diff", "html_url": "https://github.com/huggingface/datasets/pull/1925", "merged_at": "2021-02-22T15:36:07Z", "patch_url": "https://github.com/huggingface/datasets/pull/1925.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1925" }
813,600,902
[]
https://api.github.com/repos/huggingface/datasets/issues/1925
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Fix the bugs noticed in #1915 There was a bug when `with_embeddings=False` where the configuration name was the same as if `with_embeddings=True`, which led the dataset builder to do bad verifications (for example it used to expect to download the embeddings for `with_embeddings=False`). Another issue was that setting `index_name="no_index"` didn't set `with_index` to False. I fixed both of them and added dummy data for those configurations for testing.
2021-02-25T01:33:48Z
https://github.com/huggingface/datasets/pull/1925
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1925/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1924/comments
https://api.github.com/repos/huggingface/datasets/issues/1924/timeline
2022-10-05T13:07:11Z
null
completed
MDU6SXNzdWU4MTM1OTk3MzM=
closed
[]
null
1,924
{ "avatar_url": "https://avatars.githubusercontent.com/u/22492839?v=4", "events_url": "https://api.github.com/users/PierreColombo/events{/privacy}", "followers_url": "https://api.github.com/users/PierreColombo/followers", "following_url": "https://api.github.com/users/PierreColombo/following{/other_user}", "gists_url": "https://api.github.com/users/PierreColombo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PierreColombo", "id": 22492839, "login": "PierreColombo", "node_id": "MDQ6VXNlcjIyNDkyODM5", "organizations_url": "https://api.github.com/users/PierreColombo/orgs", "received_events_url": "https://api.github.com/users/PierreColombo/received_events", "repos_url": "https://api.github.com/users/PierreColombo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PierreColombo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PierreColombo/subscriptions", "type": "User", "url": "https://api.github.com/users/PierreColombo" }
Anonymous Dataset Addition (i.e Anonymous PR?)
https://api.github.com/repos/huggingface/datasets/issues/1924/events
null
https://api.github.com/repos/huggingface/datasets/issues/1924/labels{/name}
2021-02-22T15:22:30Z
null
false
null
null
813,599,733
[]
https://api.github.com/repos/huggingface/datasets/issues/1924
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hello, Thanks a lot for your librairy. We plan to submit a paper on OpenReview using the Anonymous setting. Is it possible to add a new dataset without breaking the anonimity, with a link to the paper ? Cheers @eusip
2022-10-05T13:07:11Z
https://github.com/huggingface/datasets/issues/1924
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1924/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1923/comments
https://api.github.com/repos/huggingface/datasets/issues/1923/timeline
2021-02-22T11:22:43Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3NTI0MTU0
closed
[]
false
1,923
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Fix save_to_disk with relative path
https://api.github.com/repos/huggingface/datasets/issues/1923/events
null
https://api.github.com/repos/huggingface/datasets/issues/1923/labels{/name}
2021-02-22T10:27:19Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1923.diff", "html_url": "https://github.com/huggingface/datasets/pull/1923", "merged_at": "2021-02-22T11:22:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/1923.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1923" }
813,363,472
[]
https://api.github.com/repos/huggingface/datasets/issues/1923
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As noticed in #1919 and #1920 the target directory was not created using `makedirs` so saving to it raises `FileNotFoundError`. For absolute paths it works but not for the good reason. This is because the target path was the same as the temporary path where in-memory data are written as an intermediary step. I added the `makedirs` call using `fs.makedirs` in order to support remote filesystems. I also fixed the issue with the target path being the temporary path. I added a test case for relative paths as well for save_to_disk. Thanks to @M-Salti for reporting and investigating
2021-02-22T11:22:44Z
https://github.com/huggingface/datasets/pull/1923
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1923/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1922/comments
https://api.github.com/repos/huggingface/datasets/issues/1922/timeline
null
null
null
MDU6SXNzdWU4MTMxNDA4MDY=
open
[]
null
1,922
{ "avatar_url": "https://avatars.githubusercontent.com/u/22306304?v=4", "events_url": "https://api.github.com/users/JieyuZhao/events{/privacy}", "followers_url": "https://api.github.com/users/JieyuZhao/followers", "following_url": "https://api.github.com/users/JieyuZhao/following{/other_user}", "gists_url": "https://api.github.com/users/JieyuZhao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JieyuZhao", "id": 22306304, "login": "JieyuZhao", "node_id": "MDQ6VXNlcjIyMzA2MzA0", "organizations_url": "https://api.github.com/users/JieyuZhao/orgs", "received_events_url": "https://api.github.com/users/JieyuZhao/received_events", "repos_url": "https://api.github.com/users/JieyuZhao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JieyuZhao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JieyuZhao/subscriptions", "type": "User", "url": "https://api.github.com/users/JieyuZhao" }
How to update the "wino_bias" dataset
https://api.github.com/repos/huggingface/datasets/issues/1922/events
null
https://api.github.com/repos/huggingface/datasets/issues/1922/labels{/name}
2021-02-22T05:39:39Z
null
false
null
null
813,140,806
[]
https://api.github.com/repos/huggingface/datasets/issues/1922
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Hi all, Thanks for the efforts to collect all the datasets! But I think there is a problem with the wino_bias dataset. The current link is not correct. How can I update that? Thanks!
2021-02-22T10:35:59Z
https://github.com/huggingface/datasets/issues/1922
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1922/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1921/comments
https://api.github.com/repos/huggingface/datasets/issues/1921/timeline
2021-02-22T09:44:10Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc3MDEzMDM4
closed
[]
false
1,921
{ "avatar_url": "https://avatars.githubusercontent.com/u/7731709?v=4", "events_url": "https://api.github.com/users/justin-yan/events{/privacy}", "followers_url": "https://api.github.com/users/justin-yan/followers", "following_url": "https://api.github.com/users/justin-yan/following{/other_user}", "gists_url": "https://api.github.com/users/justin-yan/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/justin-yan", "id": 7731709, "login": "justin-yan", "node_id": "MDQ6VXNlcjc3MzE3MDk=", "organizations_url": "https://api.github.com/users/justin-yan/orgs", "received_events_url": "https://api.github.com/users/justin-yan/received_events", "repos_url": "https://api.github.com/users/justin-yan/repos", "site_admin": false, "starred_url": "https://api.github.com/users/justin-yan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/justin-yan/subscriptions", "type": "User", "url": "https://api.github.com/users/justin-yan" }
Standardizing datasets dtypes
https://api.github.com/repos/huggingface/datasets/issues/1921/events
null
https://api.github.com/repos/huggingface/datasets/issues/1921/labels{/name}
2021-02-20T22:04:01Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1921.diff", "html_url": "https://github.com/huggingface/datasets/pull/1921", "merged_at": "2021-02-22T09:44:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/1921.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1921" }
812,716,042
[]
https://api.github.com/repos/huggingface/datasets/issues/1921
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
This PR follows up on discussion in #1900 to have an explicit set of basic dtypes for datasets. This moves away from str(pyarrow.DataType) as the method of choice for creating dtypes, favoring an explicit mapping to a list of supported Value dtypes. I believe in practice this should be backward compatible, since anyone previously using Value() would only have been able to use dtypes that had an identically named pyarrow factory function, which are all explicitly supported here, with `float32` and `float64` acting as the official datasets dtypes, which resolves the tension between `double` being the pyarrow dtype and `float64` being the pyarrow type factory function.
2021-02-22T09:44:10Z
https://github.com/huggingface/datasets/pull/1921
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1921/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1920/comments
https://api.github.com/repos/huggingface/datasets/issues/1920/timeline
2021-02-22T10:30:11Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc2OTQ5NzI2
closed
[]
false
1,920
{ "avatar_url": "https://avatars.githubusercontent.com/u/9285264?v=4", "events_url": "https://api.github.com/users/M-Salti/events{/privacy}", "followers_url": "https://api.github.com/users/M-Salti/followers", "following_url": "https://api.github.com/users/M-Salti/following{/other_user}", "gists_url": "https://api.github.com/users/M-Salti/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/M-Salti", "id": 9285264, "login": "M-Salti", "node_id": "MDQ6VXNlcjkyODUyNjQ=", "organizations_url": "https://api.github.com/users/M-Salti/orgs", "received_events_url": "https://api.github.com/users/M-Salti/received_events", "repos_url": "https://api.github.com/users/M-Salti/repos", "site_admin": false, "starred_url": "https://api.github.com/users/M-Salti/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/M-Salti/subscriptions", "type": "User", "url": "https://api.github.com/users/M-Salti" }
Fix save_to_disk issue
https://api.github.com/repos/huggingface/datasets/issues/1920/events
null
https://api.github.com/repos/huggingface/datasets/issues/1920/labels{/name}
2021-02-20T14:22:39Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1920.diff", "html_url": "https://github.com/huggingface/datasets/pull/1920", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1920.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1920" }
812,628,220
[]
https://api.github.com/repos/huggingface/datasets/issues/1920
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
Fixes #1919
2021-02-22T10:30:11Z
https://github.com/huggingface/datasets/pull/1920
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1920/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1919/comments
https://api.github.com/repos/huggingface/datasets/issues/1919/timeline
2021-03-03T17:40:27Z
null
completed
MDU6SXNzdWU4MTI2MjY4NzI=
closed
[]
null
1,919
{ "avatar_url": "https://avatars.githubusercontent.com/u/9285264?v=4", "events_url": "https://api.github.com/users/M-Salti/events{/privacy}", "followers_url": "https://api.github.com/users/M-Salti/followers", "following_url": "https://api.github.com/users/M-Salti/following{/other_user}", "gists_url": "https://api.github.com/users/M-Salti/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/M-Salti", "id": 9285264, "login": "M-Salti", "node_id": "MDQ6VXNlcjkyODUyNjQ=", "organizations_url": "https://api.github.com/users/M-Salti/orgs", "received_events_url": "https://api.github.com/users/M-Salti/received_events", "repos_url": "https://api.github.com/users/M-Salti/repos", "site_admin": false, "starred_url": "https://api.github.com/users/M-Salti/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/M-Salti/subscriptions", "type": "User", "url": "https://api.github.com/users/M-Salti" }
Failure to save with save_to_disk
https://api.github.com/repos/huggingface/datasets/issues/1919/events
null
https://api.github.com/repos/huggingface/datasets/issues/1919/labels{/name}
2021-02-20T14:18:10Z
null
false
null
null
812,626,872
[]
https://api.github.com/repos/huggingface/datasets/issues/1919
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
When I try to save a dataset locally using the `save_to_disk` method I get the error: ```bash FileNotFoundError: [Errno 2] No such file or directory: '/content/squad/train/squad-train.arrow' ``` To replicate: 1. Install `datasets` from master 2. Run this code: ```python from datasets import load_dataset squad = load_dataset("squad") # or any other dataset squad.save_to_disk("squad") # error here ``` The problem is that the method is not creating a directory with the name `dataset_path` for saving the dataset in (i.e. it's not creating the *train* and *validation* directories in this case). After creating the directory the problem resolves. I'll open a PR soon doing that and linking this issue.
2021-03-03T17:40:27Z
https://github.com/huggingface/datasets/issues/1919
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1919/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1918/comments
https://api.github.com/repos/huggingface/datasets/issues/1918/timeline
2021-02-22T13:35:06Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc2ODg2OTQ0
closed
[]
false
1,918
{ "avatar_url": "https://avatars.githubusercontent.com/u/9285264?v=4", "events_url": "https://api.github.com/users/M-Salti/events{/privacy}", "followers_url": "https://api.github.com/users/M-Salti/followers", "following_url": "https://api.github.com/users/M-Salti/following{/other_user}", "gists_url": "https://api.github.com/users/M-Salti/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/M-Salti", "id": 9285264, "login": "M-Salti", "node_id": "MDQ6VXNlcjkyODUyNjQ=", "organizations_url": "https://api.github.com/users/M-Salti/orgs", "received_events_url": "https://api.github.com/users/M-Salti/received_events", "repos_url": "https://api.github.com/users/M-Salti/repos", "site_admin": false, "starred_url": "https://api.github.com/users/M-Salti/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/M-Salti/subscriptions", "type": "User", "url": "https://api.github.com/users/M-Salti" }
Fix QA4MRE download URLs
https://api.github.com/repos/huggingface/datasets/issues/1918/events
null
https://api.github.com/repos/huggingface/datasets/issues/1918/labels{/name}
2021-02-20T07:32:17Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1918.diff", "html_url": "https://github.com/huggingface/datasets/pull/1918", "merged_at": "2021-02-22T13:35:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/1918.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1918" }
812,541,510
[]
https://api.github.com/repos/huggingface/datasets/issues/1918
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
CONTRIBUTOR
The URLs in the `dataset_infos` and `README` are correct, only the ones in the download script needed updating.
2021-02-22T13:35:06Z
https://github.com/huggingface/datasets/pull/1918
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1918/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1917/comments
https://api.github.com/repos/huggingface/datasets/issues/1917/timeline
2021-02-19T22:40:28Z
null
completed
MDU6SXNzdWU4MTIzOTAxNzg=
closed
[]
null
1,917
{ "avatar_url": "https://avatars.githubusercontent.com/u/900951?v=4", "events_url": "https://api.github.com/users/yosiasz/events{/privacy}", "followers_url": "https://api.github.com/users/yosiasz/followers", "following_url": "https://api.github.com/users/yosiasz/following{/other_user}", "gists_url": "https://api.github.com/users/yosiasz/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yosiasz", "id": 900951, "login": "yosiasz", "node_id": "MDQ6VXNlcjkwMDk1MQ==", "organizations_url": "https://api.github.com/users/yosiasz/orgs", "received_events_url": "https://api.github.com/users/yosiasz/received_events", "repos_url": "https://api.github.com/users/yosiasz/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yosiasz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yosiasz/subscriptions", "type": "User", "url": "https://api.github.com/users/yosiasz" }
UnicodeDecodeError: windows 10 machine
https://api.github.com/repos/huggingface/datasets/issues/1917/events
null
https://api.github.com/repos/huggingface/datasets/issues/1917/labels{/name}
2021-02-19T22:13:05Z
null
false
null
null
812,390,178
[]
https://api.github.com/repos/huggingface/datasets/issues/1917
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
Windows 10 Php 3.6.8 when running ``` import datasets oscar_am = datasets.load_dataset("oscar", "unshuffled_deduplicated_am") print(oscar_am["train"][0]) ``` I get the following error ``` file "C:\PYTHON\3.6.8\lib\encodings\cp1252.py", line 23, in decode return codecs.charmap_decode(input,self.errors,decoding_table)[0] UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 58: character maps to <undefined> ```
2021-02-19T22:41:11Z
https://github.com/huggingface/datasets/issues/1917
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1917/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1916/comments
https://api.github.com/repos/huggingface/datasets/issues/1916/timeline
2021-02-22T13:32:49Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc2NjgwNjY5
closed
[]
false
1,916
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Remove unused py_utils objects
https://api.github.com/repos/huggingface/datasets/issues/1916/events
null
https://api.github.com/repos/huggingface/datasets/issues/1916/labels{/name}
2021-02-19T19:51:25Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1916.diff", "html_url": "https://github.com/huggingface/datasets/pull/1916", "merged_at": "2021-02-22T13:32:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/1916.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1916" }
812,291,984
[]
https://api.github.com/repos/huggingface/datasets/issues/1916
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Remove unused/unnecessary py_utils functions/classes.
2021-02-22T14:56:56Z
https://github.com/huggingface/datasets/pull/1916
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1916/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1915/comments
https://api.github.com/repos/huggingface/datasets/issues/1915/timeline
2021-03-03T17:40:48Z
null
completed
MDU6SXNzdWU4MTIyMjk2NTQ=
closed
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
1,915
{ "avatar_url": "https://avatars.githubusercontent.com/u/18504534?v=4", "events_url": "https://api.github.com/users/nitarakad/events{/privacy}", "followers_url": "https://api.github.com/users/nitarakad/followers", "following_url": "https://api.github.com/users/nitarakad/following{/other_user}", "gists_url": "https://api.github.com/users/nitarakad/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/nitarakad", "id": 18504534, "login": "nitarakad", "node_id": "MDQ6VXNlcjE4NTA0NTM0", "organizations_url": "https://api.github.com/users/nitarakad/orgs", "received_events_url": "https://api.github.com/users/nitarakad/received_events", "repos_url": "https://api.github.com/users/nitarakad/repos", "site_admin": false, "starred_url": "https://api.github.com/users/nitarakad/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nitarakad/subscriptions", "type": "User", "url": "https://api.github.com/users/nitarakad" }
Unable to download `wiki_dpr`
https://api.github.com/repos/huggingface/datasets/issues/1915/events
null
https://api.github.com/repos/huggingface/datasets/issues/1915/labels{/name}
2021-02-19T18:11:32Z
null
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
null
812,229,654
[]
https://api.github.com/repos/huggingface/datasets/issues/1915
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
NONE
I am trying to download the `wiki_dpr` dataset. Specifically, I want to download `psgs_w100.multiset.no_index` with no embeddings/no index. In order to do so, I ran: `curr_dataset = load_dataset("wiki_dpr", embeddings_name="multiset", index_name="no_index")` However, I got the following error: `datasets.utils.info_utils.UnexpectedDownloadedFile: {'embeddings_index'}` I tried adding in flags `with_embeddings=False` and `with_index=False`: `curr_dataset = load_dataset("wiki_dpr", with_embeddings=False, with_index=False, embeddings_name="multiset", index_name="no_index")` But I got the following error: `raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) datasets.utils.info_utils.ExpectedMoreDownloadedFiles: {‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_5’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_15’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_30’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_36’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_18’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_41’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_13’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_48’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_10’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_23’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_14’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_34’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_43’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_40’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_47’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_3’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_24’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_7’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_33’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_46’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_42’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_27’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_29’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_26’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_22’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_4’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_20’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_39’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_6’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_16’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_8’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_35’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_49’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_17’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_25’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_0’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_38’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_12’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_44’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_1’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_32’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_19’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_31’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_37’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_9’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_11’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_21’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_28’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_45’, ‘https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_2’}` Is there anything else I need to set to download the dataset? **UPDATE**: just running `curr_dataset = load_dataset("wiki_dpr", with_embeddings=False, with_index=False)` gives me the same error.
2021-03-03T17:40:48Z
https://github.com/huggingface/datasets/issues/1915
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1915/reactions" }
false
https://api.github.com/repos/huggingface/datasets/issues/1914/comments
https://api.github.com/repos/huggingface/datasets/issues/1914/timeline
2021-02-21T19:48:03Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc2NTYyNTkz
closed
[]
false
1,914
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
Fix logging imports and make all datasets use library logger
https://api.github.com/repos/huggingface/datasets/issues/1914/events
null
https://api.github.com/repos/huggingface/datasets/issues/1914/labels{/name}
2021-02-19T16:12:34Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1914.diff", "html_url": "https://github.com/huggingface/datasets/pull/1914", "merged_at": "2021-02-21T19:48:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/1914.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1914" }
812,149,201
[]
https://api.github.com/repos/huggingface/datasets/issues/1914
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
Fix library relative logging imports and make all datasets use library logger.
2021-02-21T19:48:03Z
https://github.com/huggingface/datasets/pull/1914
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1914/reactions" }
true
https://api.github.com/repos/huggingface/datasets/issues/1913/comments
https://api.github.com/repos/huggingface/datasets/issues/1913/timeline
2021-02-19T18:36:11Z
null
null
MDExOlB1bGxSZXF1ZXN0NTc2NTQ0NjQw
closed
[]
false
1,913
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
Add keep_linebreaks parameter to text loader
https://api.github.com/repos/huggingface/datasets/issues/1913/events
null
https://api.github.com/repos/huggingface/datasets/issues/1913/labels{/name}
2021-02-19T15:43:45Z
null
false
null
{ "diff_url": "https://github.com/huggingface/datasets/pull/1913.diff", "html_url": "https://github.com/huggingface/datasets/pull/1913", "merged_at": "2021-02-19T18:36:11Z", "patch_url": "https://github.com/huggingface/datasets/pull/1913.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1913" }
812,127,307
[]
https://api.github.com/repos/huggingface/datasets/issues/1913
[ "", "" ]
https://api.github.com/repos/huggingface/datasets
MEMBER
As asked in #870 and https://github.com/huggingface/transformers/issues/10269 there should be a parameter to keep the linebreaks when loading a text dataset. cc @sgugger @jncasey
2021-02-19T18:36:12Z
https://github.com/huggingface/datasets/pull/1913
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/1913/reactions" }
true