url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
1.62B
node_id
stringlengths
18
32
number
int64
1
5.62k
title
stringlengths
1
290
user
dict
labels
list
state
stringclasses
1 value
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
3 values
active_lock_reason
null
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
state_reason
stringclasses
2 values
draft
bool
2 classes
pull_request
dict
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/2296
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2296/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2296/comments
https://api.github.com/repos/huggingface/datasets/issues/2296/events
https://github.com/huggingface/datasets/issues/2296
872,974,907
MDU6SXNzdWU4NzI5NzQ5MDc=
2,296
1
{ "login": "zinnyi", "id": 82880142, "node_id": "MDQ6VXNlcjgyODgwMTQy", "avatar_url": "https://avatars.githubusercontent.com/u/82880142?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zinnyi", "html_url": "https://github.com/zinnyi", "followers_url": "https://api.github.com/users/zinnyi/followers", "following_url": "https://api.github.com/users/zinnyi/following{/other_user}", "gists_url": "https://api.github.com/users/zinnyi/gists{/gist_id}", "starred_url": "https://api.github.com/users/zinnyi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zinnyi/subscriptions", "organizations_url": "https://api.github.com/users/zinnyi/orgs", "repos_url": "https://api.github.com/users/zinnyi/repos", "events_url": "https://api.github.com/users/zinnyi/events{/privacy}", "received_events_url": "https://api.github.com/users/zinnyi/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
null
[]
null
[]
2021-04-30T17:53:49
2021-05-03T08:17:31
2021-05-03T08:17:31
NONE
null
## Adding a Dataset - **Name:** *name of the dataset* - **Description:** *short description of the dataset (or link to social media or blog post)* - **Paper:** *link to the dataset paper if available* - **Data:** *link to the Github repository or current dataset location* - **Motivation:** *what are some good reasons to have this dataset* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2296/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2296/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2295
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2295/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2295/comments
https://api.github.com/repos/huggingface/datasets/issues/2295/events
https://github.com/huggingface/datasets/pull/2295
872,902,867
MDExOlB1bGxSZXF1ZXN0NjI3NzY0NDk3
2,295
Create ExtractManager
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 2851292821, "node_id": "MDU6TGFiZWwyODUxMjkyODIx", "url": "https://api.github.com/repos/huggingface/datasets/labels/refactoring", "name": "refactoring", "color": "B67A40", "default": false, "description": "Restructuring existing code without changing its external behavior" } ]
closed
false
null
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": "2021-06-08T18:48:33", "updated_at": "2021-07-21T15:36:49", "due_on": "2021-08-05T07:00:00", "closed_at": "2021-07-21T15:36:49" }
[]
2021-04-30T17:13:34
2021-07-12T14:12:03
2021-07-08T08:11:49
MEMBER
null
Perform refactoring to decouple extract functionality.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2295/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2295/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2295", "html_url": "https://github.com/huggingface/datasets/pull/2295", "diff_url": "https://github.com/huggingface/datasets/pull/2295.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2295.patch", "merged_at": "2021-07-08T08:11:49" }
true
https://api.github.com/repos/huggingface/datasets/issues/2293
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2293/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2293/comments
https://api.github.com/repos/huggingface/datasets/issues/2293/events
https://github.com/huggingface/datasets/pull/2293
872,079,385
MDExOlB1bGxSZXF1ZXN0NjI3MDQzNzQ3
2,293
imdb dataset from Don't Stop Pretraining Paper
{ "login": "BobbyManion", "id": 52530809, "node_id": "MDQ6VXNlcjUyNTMwODA5", "avatar_url": "https://avatars.githubusercontent.com/u/52530809?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BobbyManion", "html_url": "https://github.com/BobbyManion", "followers_url": "https://api.github.com/users/BobbyManion/followers", "following_url": "https://api.github.com/users/BobbyManion/following{/other_user}", "gists_url": "https://api.github.com/users/BobbyManion/gists{/gist_id}", "starred_url": "https://api.github.com/users/BobbyManion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BobbyManion/subscriptions", "organizations_url": "https://api.github.com/users/BobbyManion/orgs", "repos_url": "https://api.github.com/users/BobbyManion/repos", "events_url": "https://api.github.com/users/BobbyManion/events{/privacy}", "received_events_url": "https://api.github.com/users/BobbyManion/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-30T06:40:48
2021-04-30T06:54:25
2021-04-30T06:54:25
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2293/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2293/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2293", "html_url": "https://github.com/huggingface/datasets/pull/2293", "diff_url": "https://github.com/huggingface/datasets/pull/2293.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2293.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2292
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2292/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2292/comments
https://api.github.com/repos/huggingface/datasets/issues/2292/events
https://github.com/huggingface/datasets/pull/2292
871,230,183
MDExOlB1bGxSZXF1ZXN0NjI2MjgzNTYy
2,292
Fixed typo seperate->separate
{ "login": "laksh9950", "id": 32505743, "node_id": "MDQ6VXNlcjMyNTA1NzQz", "avatar_url": "https://avatars.githubusercontent.com/u/32505743?v=4", "gravatar_id": "", "url": "https://api.github.com/users/laksh9950", "html_url": "https://github.com/laksh9950", "followers_url": "https://api.github.com/users/laksh9950/followers", "following_url": "https://api.github.com/users/laksh9950/following{/other_user}", "gists_url": "https://api.github.com/users/laksh9950/gists{/gist_id}", "starred_url": "https://api.github.com/users/laksh9950/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/laksh9950/subscriptions", "organizations_url": "https://api.github.com/users/laksh9950/orgs", "repos_url": "https://api.github.com/users/laksh9950/repos", "events_url": "https://api.github.com/users/laksh9950/events{/privacy}", "received_events_url": "https://api.github.com/users/laksh9950/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T16:40:53
2021-04-30T13:29:18
2021-04-30T13:03:12
CONTRIBUTOR
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2292/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2292/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2292", "html_url": "https://github.com/huggingface/datasets/pull/2292", "diff_url": "https://github.com/huggingface/datasets/pull/2292.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2292.patch", "merged_at": "2021-04-30T13:03:12" }
true
https://api.github.com/repos/huggingface/datasets/issues/2291
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2291/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2291/comments
https://api.github.com/repos/huggingface/datasets/issues/2291/events
https://github.com/huggingface/datasets/pull/2291
871,216,757
MDExOlB1bGxSZXF1ZXN0NjI2MjcyNzE5
2,291
Don't copy recordbatches in memory during a table deepcopy
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T16:26:05
2021-04-29T16:34:35
2021-04-29T16:34:34
MEMBER
null
Fix issue #2276 and hopefully #2134 The recordbatches of the `IndexedTableMixin` used to speed up queries to the table were copied in memory during a table deepcopy. This resulted in `concatenate_datasets`, `load_from_disk` and other methods to always bring the data in memory. I fixed the copy similarly to #2287 and updated the test to make sure it doesn't happen again (added a test for deepcopy + make sure that the immutable arrow objects are passed to the copied table without being copied). The issue was not caught by our tests because the total allocated bytes value in PyArrow isn't updated when deepcopying recordbatches: the copy in memory wasn't detected. This behavior looks like a bug in PyArrow, I'll open a ticket on JIRA. Thanks @samsontmr , @TaskManager91 and @mariosasko for the help
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2291/reactions", "total_count": 2, "+1": 1, "-1": 0, "laugh": 0, "hooray": 1, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2291/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2291", "html_url": "https://github.com/huggingface/datasets/pull/2291", "diff_url": "https://github.com/huggingface/datasets/pull/2291.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2291.patch", "merged_at": "2021-04-29T16:34:33" }
true
https://api.github.com/repos/huggingface/datasets/issues/2290
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2290/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2290/comments
https://api.github.com/repos/huggingface/datasets/issues/2290/events
https://github.com/huggingface/datasets/pull/2290
871,145,817
MDExOlB1bGxSZXF1ZXN0NjI2MjEyNTIz
2,290
Bbaw egyptian
{ "login": "phiwi", "id": 54144149, "node_id": "MDQ6VXNlcjU0MTQ0MTQ5", "avatar_url": "https://avatars.githubusercontent.com/u/54144149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/phiwi", "html_url": "https://github.com/phiwi", "followers_url": "https://api.github.com/users/phiwi/followers", "following_url": "https://api.github.com/users/phiwi/following{/other_user}", "gists_url": "https://api.github.com/users/phiwi/gists{/gist_id}", "starred_url": "https://api.github.com/users/phiwi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/phiwi/subscriptions", "organizations_url": "https://api.github.com/users/phiwi/orgs", "repos_url": "https://api.github.com/users/phiwi/repos", "events_url": "https://api.github.com/users/phiwi/events{/privacy}", "received_events_url": "https://api.github.com/users/phiwi/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T15:27:58
2021-05-06T17:25:25
2021-05-06T17:25:25
CONTRIBUTOR
null
This is the "hieroglyph corpus" that I could unfortunately not contribute during the marathon. I re-extracted it again now, so that it is in the state as used in my paper (seee documentation). I hope it satiesfies your requirements and wish every scientist out their loads of fun deciphering a 5.000 years old language :-)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2290/reactions", "total_count": 3, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 3, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2290/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2290", "html_url": "https://github.com/huggingface/datasets/pull/2290", "diff_url": "https://github.com/huggingface/datasets/pull/2290.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2290.patch", "merged_at": "2021-05-06T17:25:25" }
true
https://api.github.com/repos/huggingface/datasets/issues/2289
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2289/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2289/comments
https://api.github.com/repos/huggingface/datasets/issues/2289/events
https://github.com/huggingface/datasets/pull/2289
871,118,573
MDExOlB1bGxSZXF1ZXN0NjI2MTg5MDU3
2,289
Allow collaborators to self-assign issues
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T15:07:06
2021-04-30T18:28:16
2021-04-30T18:28:16
MEMBER
null
Allow collaborators (without write access to the repository) to self-assign issues. In order to self-assign an issue, they have to comment it with the word: `#take` or `#self-assign`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2289/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2289/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2289", "html_url": "https://github.com/huggingface/datasets/pull/2289", "diff_url": "https://github.com/huggingface/datasets/pull/2289.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2289.patch", "merged_at": "2021-04-30T18:28:16" }
true
https://api.github.com/repos/huggingface/datasets/issues/2288
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2288/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2288/comments
https://api.github.com/repos/huggingface/datasets/issues/2288/events
https://github.com/huggingface/datasets/issues/2288
871,111,235
MDU6SXNzdWU4NzExMTEyMzU=
2,288
Load_dataset for local CSV files
{ "login": "sstojanoska", "id": 17052700, "node_id": "MDQ6VXNlcjE3MDUyNzAw", "avatar_url": "https://avatars.githubusercontent.com/u/17052700?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sstojanoska", "html_url": "https://github.com/sstojanoska", "followers_url": "https://api.github.com/users/sstojanoska/followers", "following_url": "https://api.github.com/users/sstojanoska/following{/other_user}", "gists_url": "https://api.github.com/users/sstojanoska/gists{/gist_id}", "starred_url": "https://api.github.com/users/sstojanoska/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sstojanoska/subscriptions", "organizations_url": "https://api.github.com/users/sstojanoska/orgs", "repos_url": "https://api.github.com/users/sstojanoska/repos", "events_url": "https://api.github.com/users/sstojanoska/events{/privacy}", "received_events_url": "https://api.github.com/users/sstojanoska/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi,\r\n\r\nthis is not a standard CSV file (requires additional preprocessing) so I wouldn't label this as s bug. You could parse the examples with the regex module or the string API to extract the data, but the following approach is probably the easiest (once you load the data):\r\n```python\r\nimport ast\r\n# load the dataset and copy the features\r\ndef process(ex):\r\n return {\"tokens\": ast.literal_eval(ex[\"tokens\"]), \"labels\": ast.literal_eval(ex[\"labels\"])}\r\ndataset = dataset.map(process, features=new_features)\r\n```\r\n", "Hi,\r\n\r\nThanks for the reply.\r\nI have already used ```ast.literal_eval``` to evaluate the string into list, but I was getting another error:\r\n```\r\nArrowInvalid: Could not convert X with type str: tried to convert to int\r\n```\r\nWhy this happens ? Should labels be mapped to their ids and use int instead of str ?", "Yes, just map the labels to their ids." ]
2021-04-29T15:01:10
2021-06-15T13:49:26
2021-06-15T13:49:26
NONE
null
The method load_dataset fails to correctly load a dataset from csv. Moreover, I am working on a token-classification task ( POS tagging) , where each row in my CSV contains two columns each of them having a list of strings. row example: ```tokens | labels ['I' , 'am', 'John'] | ['PRON', 'AUX', 'PROPN' ] ``` The method, loads each list as a string: (i.g "['I' , 'am', 'John']"). To solve this issue, I copied the Datasets.Features, created Sequence types ( instead of Value) and tried to cast the features type ``` new_features['tokens'] = Sequence(feature=Value(dtype='string', id=None)) new_features['labels'] = Sequence(feature=ClassLabel(num_classes=len(tag2idx), names=list(unique_tags))) dataset = dataset.cast(new_features) ``` but I got the following error ``` ArrowNotImplementedError: Unsupported cast from string to list using function cast_list ``` Moreover, I tried to set feature parameter in load_dataset method, to my new_features, but this fails as well. How can this be solved ?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2288/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2288/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2287
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2287/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2287/comments
https://api.github.com/repos/huggingface/datasets/issues/2287/events
https://github.com/huggingface/datasets/pull/2287
871,063,374
MDExOlB1bGxSZXF1ZXN0NjI2MTQ0MTQ3
2,287
Avoid copying table's record batches
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T14:15:01
2021-04-29T16:34:23
2021-04-29T16:34:22
CONTRIBUTOR
null
Fixes #2276
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2287/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2287/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2287", "html_url": "https://github.com/huggingface/datasets/pull/2287", "diff_url": "https://github.com/huggingface/datasets/pull/2287.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2287.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2286
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2286/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2286/comments
https://api.github.com/repos/huggingface/datasets/issues/2286/events
https://github.com/huggingface/datasets/pull/2286
871,032,393
MDExOlB1bGxSZXF1ZXN0NjI2MTE5MTE2
2,286
Fix metadata validation with config names
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T13:44:32
2021-04-29T14:07:29
2021-04-29T14:07:28
MEMBER
null
I noticed in https://github.com/huggingface/datasets/pull/2280 that the metadata validator doesn't parse the tags in the readme properly when then contain the tags per config.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2286/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2286/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2286", "html_url": "https://github.com/huggingface/datasets/pull/2286", "diff_url": "https://github.com/huggingface/datasets/pull/2286.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2286.patch", "merged_at": "2021-04-29T14:07:28" }
true
https://api.github.com/repos/huggingface/datasets/issues/2285
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2285/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2285/comments
https://api.github.com/repos/huggingface/datasets/issues/2285/events
https://github.com/huggingface/datasets/issues/2285
871,005,236
MDU6SXNzdWU4NzEwMDUyMzY=
2,285
Help understanding how to build a dataset for language modeling as with the old TextDataset
{ "login": "danieldiezmallo", "id": 46021411, "node_id": "MDQ6VXNlcjQ2MDIxNDEx", "avatar_url": "https://avatars.githubusercontent.com/u/46021411?v=4", "gravatar_id": "", "url": "https://api.github.com/users/danieldiezmallo", "html_url": "https://github.com/danieldiezmallo", "followers_url": "https://api.github.com/users/danieldiezmallo/followers", "following_url": "https://api.github.com/users/danieldiezmallo/following{/other_user}", "gists_url": "https://api.github.com/users/danieldiezmallo/gists{/gist_id}", "starred_url": "https://api.github.com/users/danieldiezmallo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/danieldiezmallo/subscriptions", "organizations_url": "https://api.github.com/users/danieldiezmallo/orgs", "repos_url": "https://api.github.com/users/danieldiezmallo/repos", "events_url": "https://api.github.com/users/danieldiezmallo/events{/privacy}", "received_events_url": "https://api.github.com/users/danieldiezmallo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "\r\nI received an answer for this question on the HuggingFace Datasets forum by @lhoestq\r\n\r\nHi !\r\n\r\nIf you want to tokenize line by line, you can use this:\r\n\r\n```\r\nmax_seq_length = 512\r\nnum_proc = 4\r\n\r\ndef tokenize_function(examples):\r\n# Remove empty lines\r\nexamples[\"text\"] = [line for line in examples[\"text\"] if len(line) > 0 and not line.isspace()]\r\nreturn tokenizer(\r\n examples[\"text\"],\r\n truncation=True,\r\n max_length=max_seq_length,\r\n)\r\n\r\ntokenized_dataset = dataset.map(\r\ntokenize_function,\r\nbatched=True,\r\nnum_proc=num_proc,\r\nremove_columns=[\"text\"],\r\n)\r\n```\r\n\r\nThough the TextDataset was doing a different processing by concatenating all the texts and building blocks of size 512. If you need this behavior, then you must apply an additional map function after the tokenization:\r\n\r\n```\r\n# Main data processing function that will concatenate all texts from\r\n# our dataset and generate chunks of max_seq_length.\r\ndef group_texts(examples):\r\n# Concatenate all texts.\r\nconcatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\r\ntotal_length = len(concatenated_examples[list(examples.keys())[0]])\r\n# We drop the small remainder, we could add padding if the model supported it instead of this drop,\r\n# you can customize this part to your needs.\r\ntotal_length = (total_length // max_seq_length) * max_seq_length\r\n# Split by chunks of max_len.\r\nresult = {\r\n k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]\r\n for k, t in concatenated_examples.items()\r\n}\r\nreturn result\r\n\r\n# Note that with `batched=True`, this map processes 1,000 texts together,\r\n# so group_texts throws away a remainder for each of those groups of 1,000 texts.\r\n# You can adjust that batch_size here but a higher value might be slower to preprocess.\r\n\r\ntokenized_dataset = tokenized_dataset.map(\r\ngroup_texts,\r\nbatched=True,\r\nnum_proc=num_proc,\r\n)\r\n```\r\n\r\nThis code comes from the processing of the run_mlm.py example script of transformers\r\n\r\n", "Resolved" ]
2021-04-29T13:16:45
2021-05-19T07:22:45
2021-05-19T07:22:39
NONE
null
Hello, I am trying to load a custom dataset that I will then use for language modeling. The dataset consists of a text file that has a whole document in each line, meaning that each line overpasses the normal 512 tokens limit of most tokenizers. I would like to understand what is the process to build a text dataset that tokenizes each line, having previously split the documents in the dataset into lines of a "tokenizable" size, as the old TextDataset class would do, where you only had to do the following, and a tokenized dataset without text loss would be available to pass to a DataCollator: ``` model_checkpoint = 'distilbert-base-uncased' from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) from transformers import TextDataset dataset = TextDataset( tokenizer=tokenizer, file_path="path/to/text_file.txt", block_size=512, ) ``` For now, what I have is the following, which, of course, throws an error because each line is longer than the maximum block size in the tokenizer: ``` import datasets dataset = datasets.load_dataset('path/to/text_file.txt') model_checkpoint = 'distilbert-base-uncased' tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def tokenize_function(examples): return tokenizer(examples["text"]) tokenized_datasets = dataset.map(tokenize_function, batched=True, num_proc=4, remove_columns=["text"]) tokenized_datasets ``` So what would be the "standard" way of creating a dataset in the way it was done before? Thank you very much for the help :))
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2285/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2285/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2284
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2284/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2284/comments
https://api.github.com/repos/huggingface/datasets/issues/2284/events
https://github.com/huggingface/datasets/pull/2284
870,932,710
MDExOlB1bGxSZXF1ZXN0NjI2MDM5MDc5
2,284
Initialize Imdb dataset as used in Don't Stop Pretraining Paper
{ "login": "BobbyManion", "id": 52530809, "node_id": "MDQ6VXNlcjUyNTMwODA5", "avatar_url": "https://avatars.githubusercontent.com/u/52530809?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BobbyManion", "html_url": "https://github.com/BobbyManion", "followers_url": "https://api.github.com/users/BobbyManion/followers", "following_url": "https://api.github.com/users/BobbyManion/following{/other_user}", "gists_url": "https://api.github.com/users/BobbyManion/gists{/gist_id}", "starred_url": "https://api.github.com/users/BobbyManion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BobbyManion/subscriptions", "organizations_url": "https://api.github.com/users/BobbyManion/orgs", "repos_url": "https://api.github.com/users/BobbyManion/repos", "events_url": "https://api.github.com/users/BobbyManion/events{/privacy}", "received_events_url": "https://api.github.com/users/BobbyManion/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T11:52:38
2021-04-29T12:54:34
2021-04-29T12:54:34
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2284/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2284/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2284", "html_url": "https://github.com/huggingface/datasets/pull/2284", "diff_url": "https://github.com/huggingface/datasets/pull/2284.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2284.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2283
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2283/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2283/comments
https://api.github.com/repos/huggingface/datasets/issues/2283/events
https://github.com/huggingface/datasets/pull/2283
870,926,475
MDExOlB1bGxSZXF1ZXN0NjI2MDM0MDk5
2,283
Initialize imdb dataset from don't stop pretraining paper
{ "login": "BobbyManion", "id": 52530809, "node_id": "MDQ6VXNlcjUyNTMwODA5", "avatar_url": "https://avatars.githubusercontent.com/u/52530809?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BobbyManion", "html_url": "https://github.com/BobbyManion", "followers_url": "https://api.github.com/users/BobbyManion/followers", "following_url": "https://api.github.com/users/BobbyManion/following{/other_user}", "gists_url": "https://api.github.com/users/BobbyManion/gists{/gist_id}", "starred_url": "https://api.github.com/users/BobbyManion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BobbyManion/subscriptions", "organizations_url": "https://api.github.com/users/BobbyManion/orgs", "repos_url": "https://api.github.com/users/BobbyManion/repos", "events_url": "https://api.github.com/users/BobbyManion/events{/privacy}", "received_events_url": "https://api.github.com/users/BobbyManion/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T11:44:54
2021-04-29T11:50:24
2021-04-29T11:50:24
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2283/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2283/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2283", "html_url": "https://github.com/huggingface/datasets/pull/2283", "diff_url": "https://github.com/huggingface/datasets/pull/2283.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2283.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2282
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2282/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2282/comments
https://api.github.com/repos/huggingface/datasets/issues/2282/events
https://github.com/huggingface/datasets/pull/2282
870,900,332
MDExOlB1bGxSZXF1ZXN0NjI2MDEyMzM3
2,282
Initialize imdb dataset from don't stop pretraining paper
{ "login": "BobbyManion", "id": 52530809, "node_id": "MDQ6VXNlcjUyNTMwODA5", "avatar_url": "https://avatars.githubusercontent.com/u/52530809?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BobbyManion", "html_url": "https://github.com/BobbyManion", "followers_url": "https://api.github.com/users/BobbyManion/followers", "following_url": "https://api.github.com/users/BobbyManion/following{/other_user}", "gists_url": "https://api.github.com/users/BobbyManion/gists{/gist_id}", "starred_url": "https://api.github.com/users/BobbyManion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BobbyManion/subscriptions", "organizations_url": "https://api.github.com/users/BobbyManion/orgs", "repos_url": "https://api.github.com/users/BobbyManion/repos", "events_url": "https://api.github.com/users/BobbyManion/events{/privacy}", "received_events_url": "https://api.github.com/users/BobbyManion/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T11:17:56
2021-04-29T11:43:51
2021-04-29T11:43:51
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2282/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2282/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2282", "html_url": "https://github.com/huggingface/datasets/pull/2282", "diff_url": "https://github.com/huggingface/datasets/pull/2282.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2282.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2281
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2281/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2281/comments
https://api.github.com/repos/huggingface/datasets/issues/2281/events
https://github.com/huggingface/datasets/pull/2281
870,792,784
MDExOlB1bGxSZXF1ZXN0NjI1OTI2MjAw
2,281
Update multi_woz_v22 checksum
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T09:09:11
2021-04-29T13:41:35
2021-04-29T13:41:34
MEMBER
null
Fix issue https://github.com/huggingface/datasets/issues/1876 The files were changed in https://github.com/budzianowski/multiwoz/pull/72
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2281/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2281/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2281", "html_url": "https://github.com/huggingface/datasets/pull/2281", "diff_url": "https://github.com/huggingface/datasets/pull/2281.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2281.patch", "merged_at": "2021-04-29T13:41:34" }
true
https://api.github.com/repos/huggingface/datasets/issues/2280
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2280/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2280/comments
https://api.github.com/repos/huggingface/datasets/issues/2280/events
https://github.com/huggingface/datasets/pull/2280
870,780,431
MDExOlB1bGxSZXF1ZXN0NjI1OTE2Mzcy
2,280
Fixed typo seperate->separate
{ "login": "laksh9950", "id": 32505743, "node_id": "MDQ6VXNlcjMyNTA1NzQz", "avatar_url": "https://avatars.githubusercontent.com/u/32505743?v=4", "gravatar_id": "", "url": "https://api.github.com/users/laksh9950", "html_url": "https://github.com/laksh9950", "followers_url": "https://api.github.com/users/laksh9950/followers", "following_url": "https://api.github.com/users/laksh9950/following{/other_user}", "gists_url": "https://api.github.com/users/laksh9950/gists{/gist_id}", "starred_url": "https://api.github.com/users/laksh9950/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/laksh9950/subscriptions", "organizations_url": "https://api.github.com/users/laksh9950/orgs", "repos_url": "https://api.github.com/users/laksh9950/repos", "events_url": "https://api.github.com/users/laksh9950/events{/privacy}", "received_events_url": "https://api.github.com/users/laksh9950/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-29T08:55:46
2021-04-29T16:41:22
2021-04-29T16:41:16
CONTRIBUTOR
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2280/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2280/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2280", "html_url": "https://github.com/huggingface/datasets/pull/2280", "diff_url": "https://github.com/huggingface/datasets/pull/2280.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2280.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2279
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2279/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2279/comments
https://api.github.com/repos/huggingface/datasets/issues/2279/events
https://github.com/huggingface/datasets/issues/2279
870,431,662
MDU6SXNzdWU4NzA0MzE2NjI=
2,279
Compatibility with Ubuntu 18 and GLIBC 2.27?
{ "login": "tginart", "id": 11379648, "node_id": "MDQ6VXNlcjExMzc5NjQ4", "avatar_url": "https://avatars.githubusercontent.com/u/11379648?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tginart", "html_url": "https://github.com/tginart", "followers_url": "https://api.github.com/users/tginart/followers", "following_url": "https://api.github.com/users/tginart/following{/other_user}", "gists_url": "https://api.github.com/users/tginart/gists{/gist_id}", "starred_url": "https://api.github.com/users/tginart/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tginart/subscriptions", "organizations_url": "https://api.github.com/users/tginart/orgs", "repos_url": "https://api.github.com/users/tginart/repos", "events_url": "https://api.github.com/users/tginart/events{/privacy}", "received_events_url": "https://api.github.com/users/tginart/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "From the trace this seems like an error in the tokenizer library instead.\r\n\r\nDo you mind opening an issue at https://github.com/huggingface/tokenizers instead?", "Hi @tginart, thanks for reporting.\r\n\r\nI think this issue is already open at `tokenizers` library: https://github.com/huggingface/tokenizers/issues/685" ]
2021-04-28T22:08:07
2021-04-29T07:42:42
2021-04-29T07:42:42
NONE
null
## Describe the bug For use on Ubuntu systems, it seems that datasets requires GLIBC 2.29. However, Ubuntu 18 runs with GLIBC 2.27 and it seems [non-trivial to upgrade GLIBC to 2.29 for Ubuntu 18 users](https://www.digitalocean.com/community/questions/how-install-glibc-2-29-or-higher-in-ubuntu-18-04). I'm not sure if there is anything that can be done about this, but I'd like to confirm that using huggingface/datasets requires either an upgrade to Ubuntu 19/20 or a hand-rolled install of a higher version of GLIBC. ## Steps to reproduce the bug 1. clone the transformers repo 2. move to examples/pytorch/language-modeling 3. run example command: ```python run_clm.py --model_name_or_path gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --do_eval --output_dir /tmp/test-clm``` ## Expected results As described in the transformers repo. ## Actual results ```Traceback (most recent call last): File "run_clm.py", line 34, in <module> from transformers import ( File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/__init__.py", line 2487, in __getattr__ return super().__getattr__(name) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/file_utils.py", line 1699, in __getattr__ module = self._get_module(self._class_to_module[name]) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/__init__.py", line 2481, in _get_module return importlib.import_module("." + module_name, self.__name__) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/__init__.py", line 19, in <module> from . import ( File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/layoutlm/__init__.py", line 23, in <module> from .tokenization_layoutlm import LayoutLMTokenizer File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/layoutlm/tokenization_layoutlm.py", line 19, in <module> from ..bert.tokenization_bert import BertTokenizer File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/models/bert/tokenization_bert.py", line 23, in <module> from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/tokenization_utils.py", line 26, in <module> from .tokenization_utils_base import ( File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/transformers/tokenization_utils_base.py", line 68, in <module> from tokenizers import AddedToken File "/home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/tokenizers/__init__.py", line 79, in <module> from .tokenizers import ( ImportError: /lib/x86_64-linux-gnu/libm.so.6: version `GLIBC_2.29' not found (required by /home/tginart/anaconda3/envs/huggingface/lib/python3.7/site-packages/tokenizers/tokenizers.cpython-37m-x86_64-linux-gnu.so) ``` ## Versions Paste the output of the following code: ``` - Datasets: 1.6.1 - Python: 3.7.10 (default, Feb 26 2021, 18:47:35) [GCC 7.3.0] - Platform: Linux-4.15.0-128-generic-x86_64-with-debian-buster-sid ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2279/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2279/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2278
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2278/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2278/comments
https://api.github.com/repos/huggingface/datasets/issues/2278/events
https://github.com/huggingface/datasets/issues/2278
870,088,059
MDU6SXNzdWU4NzAwODgwNTk=
2,278
Loss result inGptNeoForCasual
{ "login": "Yossillamm", "id": 51174606, "node_id": "MDQ6VXNlcjUxMTc0NjA2", "avatar_url": "https://avatars.githubusercontent.com/u/51174606?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Yossillamm", "html_url": "https://github.com/Yossillamm", "followers_url": "https://api.github.com/users/Yossillamm/followers", "following_url": "https://api.github.com/users/Yossillamm/following{/other_user}", "gists_url": "https://api.github.com/users/Yossillamm/gists{/gist_id}", "starred_url": "https://api.github.com/users/Yossillamm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Yossillamm/subscriptions", "organizations_url": "https://api.github.com/users/Yossillamm/orgs", "repos_url": "https://api.github.com/users/Yossillamm/repos", "events_url": "https://api.github.com/users/Yossillamm/events{/privacy}", "received_events_url": "https://api.github.com/users/Yossillamm/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Hi ! I think you might have to ask on the `transformers` repo on or the forum at https://discuss.huggingface.co/\r\n\r\nClosing since it's not related to this library" ]
2021-04-28T15:39:52
2021-05-06T16:14:23
2021-05-06T16:14:23
NONE
null
Is there any way you give the " loss" and "logits" results in the gpt neo api?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2278/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2278/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2276
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2276/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2276/comments
https://api.github.com/repos/huggingface/datasets/issues/2276/events
https://github.com/huggingface/datasets/issues/2276
870,010,511
MDU6SXNzdWU4NzAwMTA1MTE=
2,276
concatenate_datasets loads all the data into memory
{ "login": "TaskManager91", "id": 7063207, "node_id": "MDQ6VXNlcjcwNjMyMDc=", "avatar_url": "https://avatars.githubusercontent.com/u/7063207?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TaskManager91", "html_url": "https://github.com/TaskManager91", "followers_url": "https://api.github.com/users/TaskManager91/followers", "following_url": "https://api.github.com/users/TaskManager91/following{/other_user}", "gists_url": "https://api.github.com/users/TaskManager91/gists{/gist_id}", "starred_url": "https://api.github.com/users/TaskManager91/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TaskManager91/subscriptions", "organizations_url": "https://api.github.com/users/TaskManager91/orgs", "repos_url": "https://api.github.com/users/TaskManager91/repos", "events_url": "https://api.github.com/users/TaskManager91/events{/privacy}", "received_events_url": "https://api.github.com/users/TaskManager91/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Therefore, when I try to concatenate larger datasets (5x 35GB data sets) I also get an out of memory error, since over 90GB of swap space was used at the time of the crash:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nMemoryError Traceback (most recent call last)\r\n<ipython-input-6-9766d77530b9> in <module>\r\n 20 print(file_name)\r\n 21 cv_batch = load_from_disk(file_name)\r\n---> 22 cv_sampled_train = concatenate_datasets([cv_sampled_train, cv_batch])\r\n 23 \r\n 24 print(\"Saving to disk!\")\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\arrow_dataset.py in concatenate_datasets(dsets, info, split, axis)\r\n 2891 \r\n 2892 # Concatenate tables\r\n-> 2893 table = concat_tables([dset._data for dset in dsets if len(dset._data) > 0], axis=axis)\r\n 2894 table = update_metadata_with_features(table, None)\r\n 2895 \r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in concat_tables(tables, axis)\r\n 837 if len(tables) == 1:\r\n 838 return tables[0]\r\n--> 839 return ConcatenationTable.from_tables(tables, axis=axis)\r\n 840 \r\n 841 \r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in from_tables(cls, tables, axis)\r\n 697 return result\r\n 698 \r\n--> 699 blocks = to_blocks(tables[0])\r\n 700 for table in tables[1:]:\r\n 701 table_blocks = to_blocks(table)\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in to_blocks(table)\r\n 669 return [[InMemoryTable(table)]]\r\n 670 elif isinstance(table, ConcatenationTable):\r\n--> 671 return copy.deepcopy(table.blocks)\r\n 672 else:\r\n 673 return [[table]]\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 151 copier = getattr(x, \"__deepcopy__\", None)\r\n 152 if copier is not None:\r\n--> 153 y = copier(memo)\r\n 154 else:\r\n 155 reductor = dispatch_table.get(cls)\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in __deepcopy__(self, memo)\r\n 143 # by adding it to the memo, self.table won't be copied\r\n 144 memo[id(self.table)] = self.table\r\n--> 145 return _deepcopy(self, memo)\r\n 146 \r\n 147 def __getstate__(self):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\datasets\\table.py in _deepcopy(x, memo)\r\n 62 memo[id(x)] = result\r\n 63 for k, v in x.__dict__.items():\r\n---> 64 setattr(result, k, copy.deepcopy(v, memo))\r\n 65 return result\r\n 66 \r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 170 y = x\r\n 171 else:\r\n--> 172 y = _reconstruct(x, memo, *rv)\r\n 173 \r\n 174 # If is its own copy, don't memoize.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)\r\n 262 if deep and args:\r\n 263 args = (deepcopy(arg, memo) for arg in args)\r\n--> 264 y = func(*args)\r\n 265 if deep:\r\n 266 memo[id(x)] = y\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <genexpr>(.0)\r\n 261 deep = memo is not None\r\n 262 if deep and args:\r\n--> 263 args = (deepcopy(arg, memo) for arg in args)\r\n 264 y = func(*args)\r\n 265 if deep:\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 170 y = x\r\n 171 else:\r\n--> 172 y = _reconstruct(x, memo, *rv)\r\n 173 \r\n 174 # If is its own copy, don't memoize.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)\r\n 262 if deep and args:\r\n 263 args = (deepcopy(arg, memo) for arg in args)\r\n--> 264 y = func(*args)\r\n 265 if deep:\r\n 266 memo[id(x)] = y\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <genexpr>(.0)\r\n 261 deep = memo is not None\r\n 262 if deep and args:\r\n--> 263 args = (deepcopy(arg, memo) for arg in args)\r\n 264 y = func(*args)\r\n 265 if deep:\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_tuple(x, memo, deepcopy)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <listcomp>(.0)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_tuple(x, memo, deepcopy)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in <listcomp>(.0)\r\n 208 \r\n 209 def _deepcopy_tuple(x, memo, deepcopy=deepcopy):\r\n--> 210 y = [deepcopy(a, memo) for a in x]\r\n 211 # We're not going to put the tuple in the memo, but it's still important we\r\n 212 # check for it, in case the tuple contains recursive mutable structures.\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 144 copier = _deepcopy_dispatch.get(cls)\r\n 145 if copier is not None:\r\n--> 146 y = copier(x, memo)\r\n 147 else:\r\n 148 if issubclass(cls, type):\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in _deepcopy_list(x, memo, deepcopy)\r\n 203 append = y.append\r\n 204 for a in x:\r\n--> 205 append(deepcopy(a, memo))\r\n 206 return y\r\n 207 d[list] = _deepcopy_list\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\copy.py in deepcopy(x, memo, _nil)\r\n 159 reductor = getattr(x, \"__reduce_ex__\", None)\r\n 160 if reductor is not None:\r\n--> 161 rv = reductor(4)\r\n 162 else:\r\n 163 reductor = getattr(x, \"__reduce__\", None)\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\pyarrow\\io.pxi in pyarrow.lib.Buffer.__reduce_ex__()\r\n\r\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\pyarrow\\io.pxi in pyarrow.lib.Buffer.to_pybytes()\r\n\r\nMemoryError: \r\n\r\n```", "Hi ! this looks like an important issue. Let me try to reproduce this.\r\nCc @samsontmr this might be related to the memory issue you have in #2134 ", "@lhoestq Just went to open a similar issue.\r\n\r\nIt seems like deep copying (tested on master) the dataset object writes the table's record batches (`dset._data._batches`) into RAM.\r\n\r\nTo find the bug, I modified the `_deepcopy` function in `table.py` as follows:\r\n```python\r\ndef _deepcopy(x, memo: dict):\r\n \"\"\"deepcopy a regular class instance\"\"\"\r\n import psutil # pip install this package\r\n import time\r\n cls = x.__class__\r\n result = cls.__new__(cls)\r\n memo[id(x)] = result\r\n for k, v in x.__dict__.items():\r\n print(\"=\"* 50)\r\n print(\"Current memory:\", psutil.virtual_memory().percent)\r\n print(f\"Saving object {k} with value {v}\")\r\n setattr(result, k, copy.deepcopy(v, memo))\r\n time.sleep(5)\r\n print(\"Memory after copy:\", psutil.virtual_memory().percent)\r\n return result\r\n```\r\nTest script:\r\n```python\r\nimport copy\r\nfrom datasets import load_dataset\r\nbk = load_dataset(\"bookcorpus\", split=\"train\")\r\nbk_copy = copy.deepcopy(bk)\r\n```", "Thanks for the insights @mariosasko ! I'm working on a fix.\r\nSince this is a big issue I'll make a patch release as soon as this is fixed", "Hi @samsontmr @TaskManager91 the fix is on the master branch, feel free to install `datasets` from source and let us know if you still have issues", "We just released `datasets` 1.6.2 that includes the fix :)", "thanks it works like a charm! :)" ]
2021-04-28T14:27:21
2021-05-03T08:41:55
2021-05-03T08:41:55
NONE
null
## Describe the bug When I try to concatenate 2 datasets (10GB each) , the entire data is loaded into memory instead of being written directly to disk. Interestingly, this happens when trying to save the new dataset to disk or concatenating it again. ![image](https://user-images.githubusercontent.com/7063207/116420321-2b21b480-a83e-11eb-9006-8f6ca729fb6f.png) ## Steps to reproduce the bug ```python from datasets import concatenate_datasets, load_from_disk test_sampled_pro = load_from_disk("test_sampled_pro") val_sampled_pro = load_from_disk("val_sampled_pro") big_set = concatenate_datasets([test_sampled_pro, val_sampled_pro]) # Loaded to memory big_set.save_to_disk("big_set") # Loaded to memory big_set = concatenate_datasets([big_set, val_sampled_pro]) ``` ## Expected results The data should be loaded into memory in batches and then saved directly to disk. ## Actual results The entire data set is loaded into the memory and then saved to the hard disk. ## Versions Paste the output of the following code: ```python - Datasets: 1.6.1 - Python: 3.8.8 (default, Apr 13 2021, 19:58:26) [GCC 7.3.0] - Platform: Linux-5.4.72-microsoft-standard-WSL2-x86_64-with-glibc2.10 ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2276/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2276/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2275
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2275/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2275/comments
https://api.github.com/repos/huggingface/datasets/issues/2275/events
https://github.com/huggingface/datasets/issues/2275
869,378,311
MDU6SXNzdWU4NjkzNzgzMTE=
2,275
SNLI dataset has labels of -1
{ "login": "puzzler10", "id": 17426779, "node_id": "MDQ6VXNlcjE3NDI2Nzc5", "avatar_url": "https://avatars.githubusercontent.com/u/17426779?v=4", "gravatar_id": "", "url": "https://api.github.com/users/puzzler10", "html_url": "https://github.com/puzzler10", "followers_url": "https://api.github.com/users/puzzler10/followers", "following_url": "https://api.github.com/users/puzzler10/following{/other_user}", "gists_url": "https://api.github.com/users/puzzler10/gists{/gist_id}", "starred_url": "https://api.github.com/users/puzzler10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/puzzler10/subscriptions", "organizations_url": "https://api.github.com/users/puzzler10/orgs", "repos_url": "https://api.github.com/users/puzzler10/repos", "events_url": "https://api.github.com/users/puzzler10/events{/privacy}", "received_events_url": "https://api.github.com/users/puzzler10/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi @puzzler10, \r\nThose examples where `gold_label` field was empty, -1 label was alloted to it. In order to remove it you can filter the samples from train/val/test splits. Here's how you can drop those rows from the dataset:\r\n`dataset = load_dataset(\"snli\")`\r\n`dataset_test_filter = dataset['test'].filter(lambda example: example['label'] != -1)`\r\n\r\nI agree it should have been mentioned in the documentation. I'll raise a PR regarding the same. Thanks for pointing out!" ]
2021-04-28T00:32:25
2021-05-17T13:34:18
2021-05-17T13:34:18
NONE
null
There are a number of rows with a label of -1 in the SNLI dataset. The dataset descriptions [here](https://nlp.stanford.edu/projects/snli/) and [here](https://github.com/huggingface/datasets/tree/master/datasets/snli) don't list -1 as a label possibility, and neither does the dataset viewer. As examples, see index 107 or 124 of the test set. It isn't clear what these labels mean. I found a [line of code](https://github.com/huggingface/datasets/blob/80e59ef178d3bb2090d091bc32315c655eb0633d/datasets/snli/snli.py#L94) that seems to put them in but it seems still unclear why they are there. The current workaround is to just drop the rows from any model being trained. Perhaps the documentation should be updated.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2275/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2275/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2274
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2274/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2274/comments
https://api.github.com/repos/huggingface/datasets/issues/2274/events
https://github.com/huggingface/datasets/pull/2274
869,186,276
MDExOlB1bGxSZXF1ZXN0NjI0NTkyMjQx
2,274
Always update metadata in arrow schema
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-27T19:21:57
2022-06-03T08:31:19
2021-04-29T09:57:50
MEMBER
null
We store a redundant copy of the features in the metadata of the schema of the arrow table. This is used to recover the features when doing `Dataset.from_file`. These metadata are updated after each transfor, that changes the feature types. For each function that transforms the feature types of the dataset, I added a step in the tests to make sure the metadata in the arrow schema are up to date. I also added a line to update the metadata directly in the Dataset.__init__ method. This way even a dataset instantiated with __init__ will have a table with the right metadata. Fix #2271. cc @mariosasko
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2274/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2274/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2274", "html_url": "https://github.com/huggingface/datasets/pull/2274", "diff_url": "https://github.com/huggingface/datasets/pull/2274.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2274.patch", "merged_at": "2021-04-29T09:57:50" }
true
https://api.github.com/repos/huggingface/datasets/issues/2273
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2273/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2273/comments
https://api.github.com/repos/huggingface/datasets/issues/2273/events
https://github.com/huggingface/datasets/pull/2273
869,046,290
MDExOlB1bGxSZXF1ZXN0NjI0NDcxODc1
2,273
Added CUAD metrics
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-27T16:49:12
2021-04-29T13:59:47
2021-04-29T13:59:47
CONTRIBUTOR
null
`EM`, `F1`, `AUPR`, `Precision@80%Recall`, and `Precision@90%Recall` metrics supported for CUAD
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2273/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2273/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2273", "html_url": "https://github.com/huggingface/datasets/pull/2273", "diff_url": "https://github.com/huggingface/datasets/pull/2273.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2273.patch", "merged_at": "2021-04-29T13:59:47" }
true
https://api.github.com/repos/huggingface/datasets/issues/2272
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2272/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2272/comments
https://api.github.com/repos/huggingface/datasets/issues/2272/events
https://github.com/huggingface/datasets/issues/2272
869,017,977
MDU6SXNzdWU4NjkwMTc5Nzc=
2,272
Bug in Dataset.class_encode_column
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "This has been fixed in this commit: https://github.com/huggingface/datasets/pull/2254/commits/88676c930216cd4cc31741b99827b477d2b46cb6\r\n\r\nIt was introduced in #2246 : using map with `input_columns` doesn't return the other columns anymore" ]
2021-04-27T16:13:18
2021-04-30T12:54:27
2021-04-30T12:54:27
MEMBER
null
## Describe the bug All the rest of the columns except the one passed to `Dataset.class_encode_column` are discarded. ## Expected results All the original columns should be kept. This needs regression tests.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2272/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2272/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2271
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2271/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2271/comments
https://api.github.com/repos/huggingface/datasets/issues/2271/events
https://github.com/huggingface/datasets/issues/2271
869,002,141
MDU6SXNzdWU4NjkwMDIxNDE=
2,271
Synchronize table metadata with features
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "See PR #2274 " ]
2021-04-27T15:55:13
2022-06-01T17:13:21
2022-06-01T17:13:21
MEMBER
null
**Is your feature request related to a problem? Please describe.** As pointed out in this [comment](https://github.com/huggingface/datasets/pull/2145#discussion_r621326767): > Metadata stored in the schema is just a redundant information regarding the feature types. It is used when calling Dataset.from_file to know which feature types to use. These metadata are stored in the schema of the pyarrow table by using `update_metadata_with_features`. However this something that's almost never tested properly. **Describe the solution you'd like** We should find a way to always make sure that the metadata (in `self.data.schema.metadata`) are synced with the actual feature types (in `self.info.features`).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2271/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2271/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2270
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2270/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2270/comments
https://api.github.com/repos/huggingface/datasets/issues/2270/events
https://github.com/huggingface/datasets/pull/2270
868,913,660
MDExOlB1bGxSZXF1ZXN0NjI0MzU5Njky
2,270
Fix iterable interface expected by numpy
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-27T14:35:56
2021-04-28T17:39:27
2021-04-28T17:39:27
MEMBER
null
Numpy expects the old iterable interface with `__getitem__` instead of `__iter__`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2270/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2270/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2270", "html_url": "https://github.com/huggingface/datasets/pull/2270", "diff_url": "https://github.com/huggingface/datasets/pull/2270.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2270.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2269
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2269/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2269/comments
https://api.github.com/repos/huggingface/datasets/issues/2269/events
https://github.com/huggingface/datasets/pull/2269
868,878,468
MDExOlB1bGxSZXF1ZXN0NjI0MzMwNDA3
2,269
Fix query table with iterable
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-27T13:59:38
2021-04-27T14:21:57
2021-04-27T14:21:56
MEMBER
null
The benchmark runs are failing on master because it tries to use an iterable to query the dataset. However there's currently an issue caused by the use of `np.array` instead of `np.fromiter` on the iterable. This PR fixes it
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2269/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2269/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2269", "html_url": "https://github.com/huggingface/datasets/pull/2269", "diff_url": "https://github.com/huggingface/datasets/pull/2269.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2269.patch", "merged_at": "2021-04-27T14:21:56" }
true
https://api.github.com/repos/huggingface/datasets/issues/2268
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2268/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2268/comments
https://api.github.com/repos/huggingface/datasets/issues/2268/events
https://github.com/huggingface/datasets/pull/2268
868,773,380
MDExOlB1bGxSZXF1ZXN0NjI0MjQyODg1
2,268
Don't use pyarrow 4.0.0 since it segfaults when casting a sliced ListArray of integers
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-27T11:58:28
2021-06-12T12:44:49
2021-04-27T13:43:20
MEMBER
null
This test `tests/test_table.py::test_concatenation_table_cast` segfaults with the latest update of pyarrow 4.0.0. Setting `pyarrow<4.0.0` for now. I'll open an issue on JIRA once I know more about the origin of the issue
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2268/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2268/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2268", "html_url": "https://github.com/huggingface/datasets/pull/2268", "diff_url": "https://github.com/huggingface/datasets/pull/2268.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2268.patch", "merged_at": "2021-04-27T13:43:20" }
true
https://api.github.com/repos/huggingface/datasets/issues/2266
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2266/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2266/comments
https://api.github.com/repos/huggingface/datasets/issues/2266/events
https://github.com/huggingface/datasets/pull/2266
867,864,353
MDExOlB1bGxSZXF1ZXN0NjIzNDY1OTI5
2,266
Make tests run faster
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-26T15:55:40
2021-04-29T10:00:13
2021-04-29T10:00:04
MEMBER
null
From 7min to 2min to run pytest. Ideally we should keep the whole CI run time below 10min. In this PR I removed the remote tests that were never used. I also replaced nested parametrized tests with unit tests. This makes me think that we could still add more high level tests to check for a few combinations of parameters (but not all of them since there are too many of them). Let me know what you think Finally in another PR we can also separate in two circleci jobs: - the tests of the code code of the lib - the tests of the all the dataset/metric scripts.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2266/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 2, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2266/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2266", "html_url": "https://github.com/huggingface/datasets/pull/2266", "diff_url": "https://github.com/huggingface/datasets/pull/2266.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2266.patch", "merged_at": "2021-04-29T10:00:04" }
true
https://api.github.com/repos/huggingface/datasets/issues/2265
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2265/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2265/comments
https://api.github.com/repos/huggingface/datasets/issues/2265/events
https://github.com/huggingface/datasets/pull/2265
867,490,646
MDExOlB1bGxSZXF1ZXN0NjIzMTUyOTg5
2,265
Update black
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-26T09:35:09
2021-04-26T09:47:48
2021-04-26T09:47:47
MEMBER
null
Latest black version 21.4b0 requires to reformat most dataset scripts and also the core code of the lib. This makes the CI currently fail on master
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2265/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2265/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2265", "html_url": "https://github.com/huggingface/datasets/pull/2265", "diff_url": "https://github.com/huggingface/datasets/pull/2265.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2265.patch", "merged_at": "2021-04-26T09:47:47" }
true
https://api.github.com/repos/huggingface/datasets/issues/2264
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2264/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2264/comments
https://api.github.com/repos/huggingface/datasets/issues/2264/events
https://github.com/huggingface/datasets/pull/2264
867,476,228
MDExOlB1bGxSZXF1ZXN0NjIzMTQwODA1
2,264
Fix memory issue in multiprocessing: Don't pickle table index
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-26T09:21:35
2021-04-26T10:30:28
2021-04-26T10:08:14
MEMBER
null
The table index is currently being pickled when doing multiprocessing, which brings all the record batches of the dataset in memory. I fixed that by not pickling the index attributes. Therefore each process has to rebuild the index when unpickling the table. Fix issue #2256 We'll do a patch release asap !
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2264/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2264/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2264", "html_url": "https://github.com/huggingface/datasets/pull/2264", "diff_url": "https://github.com/huggingface/datasets/pull/2264.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2264.patch", "merged_at": "2021-04-26T10:08:14" }
true
https://api.github.com/repos/huggingface/datasets/issues/2263
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2263/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2263/comments
https://api.github.com/repos/huggingface/datasets/issues/2263/events
https://github.com/huggingface/datasets/pull/2263
867,420,912
MDExOlB1bGxSZXF1ZXN0NjIzMDk0NTcy
2,263
test data added, dataset_infos updated
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-26T08:27:18
2021-04-29T09:30:21
2021-04-29T09:30:20
CONTRIBUTOR
null
Fixes #2262. Thanks for pointing out issue with dataset @jinmang2!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2263/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2263/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2263", "html_url": "https://github.com/huggingface/datasets/pull/2263", "diff_url": "https://github.com/huggingface/datasets/pull/2263.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2263.patch", "merged_at": "2021-04-29T09:30:20" }
true
https://api.github.com/repos/huggingface/datasets/issues/2262
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2262/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2262/comments
https://api.github.com/repos/huggingface/datasets/issues/2262/events
https://github.com/huggingface/datasets/issues/2262
867,325,351
MDU6SXNzdWU4NjczMjUzNTE=
2,262
NewsPH NLI dataset script fails to access test data.
{ "login": "jinmang2", "id": 37775784, "node_id": "MDQ6VXNlcjM3Nzc1Nzg0", "avatar_url": "https://avatars.githubusercontent.com/u/37775784?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jinmang2", "html_url": "https://github.com/jinmang2", "followers_url": "https://api.github.com/users/jinmang2/followers", "following_url": "https://api.github.com/users/jinmang2/following{/other_user}", "gists_url": "https://api.github.com/users/jinmang2/gists{/gist_id}", "starred_url": "https://api.github.com/users/jinmang2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jinmang2/subscriptions", "organizations_url": "https://api.github.com/users/jinmang2/orgs", "repos_url": "https://api.github.com/users/jinmang2/repos", "events_url": "https://api.github.com/users/jinmang2/events{/privacy}", "received_events_url": "https://api.github.com/users/jinmang2/received_events", "type": "User", "site_admin": false }
[ { "id": 2067388877, "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug", "name": "dataset bug", "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library" } ]
closed
false
null
[]
null
[ "Thanks @bhavitvyamalik for the fix !\r\nThe fix will be available in the next release.\r\nIt's already available on the `master` branch. For now you can either install `datasets` from source or use `script_version=\"master\"` in `load_dataset` to use the fixed version of this dataset." ]
2021-04-26T06:44:41
2021-04-29T09:32:03
2021-04-29T09:30:20
NONE
null
In Newsph-NLI Dataset (#1192), it fails to access test data. According to the script below, the download manager will download the train data when trying to download the test data. https://github.com/huggingface/datasets/blob/2a2dd6316af2cc7fdf24e4779312e8ee0c7ed98b/datasets/newsph_nli/newsph_nli.py#L71 If you download it according to the script above, you can see that train and test receive the same data as shown below. ```python >>> from datasets import load_dataset >>> newsph_nli = load_dataset(path="./datasets/newsph_nli.py") >>> newsph_nli DatasetDict({ train: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 420000 }) test: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 420000 }) validation: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 90000 }) }) >>> newsph_nli["train"][0] {'hypothesis': 'Ito ang dineklara ni Atty. Romulo Macalintal, abogado ni Robredo, kaugnay ng pagsisimula ng preliminary conference ngayong hapon sa Presidential Electoral Tribunal (PET).', 'label': 1, 'premise': '"Hindi ko ugali ang mamulitika; mas gusto kong tahimik na magtrabaho. Pero sasabihin ko ito ngayon: ang tapang, lakas, at diskarte, hindi nadadaan sa mapanirang salita. Ang kailangan ng taumbayan ay tapang sa gawa," ayon kay Robredo sa inilabas nitong statement.'} >>> newsph_nli["test"][0] {'hypothesis': 'Ito ang dineklara ni Atty. Romulo Macalintal, abogado ni Robredo, kaugnay ng pagsisimula ng preliminary conference ngayong hapon sa Presidential Electoral Tribunal (PET).', 'label': 1, 'premise': '"Hindi ko ugali ang mamulitika; mas gusto kong tahimik na magtrabaho. Pero sasabihin ko ito ngayon: ang tapang, lakas, at diskarte, hindi nadadaan sa mapanirang salita. Ang kailangan ng taumbayan ay tapang sa gawa," ayon kay Robredo sa inilabas nitong statement.'} ``` In local, I modified the code of the source as below and got the correct result. ```python 71 test_path = os.path.join(download_path, "test.csv") ``` ```python >>> from datasets import load_dataset >>> newsph_nli = load_dataset(path="./datasets/newsph_nli.py") >>> newsph_nli DatasetDict({ train: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 420000 }) test: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 9000 }) validation: Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 90000 }) }) >>> newsph_nli["train"][0] {'hypothesis': 'Ito ang dineklara ni Atty. Romulo Macalintal, abogado ni Robredo, kaugnay ng pagsisimula ng preliminary conference ngayong hapon sa Presidential Electoral Tribunal (PET).', 'label': 1, 'premise': '"Hindi ko ugali ang mamulitika; mas gusto kong tahimik na magtrabaho. Pero sasabihin ko ito ngayon: ang tapang, lakas, at diskarte, hindi nadadaan sa mapanirang salita. Ang kailangan ng taumbayan ay tapang sa gawa," ayon kay Robredo sa inilabas nitong statement.'} >>> newsph_nli["test"][0] {'hypothesis': '-- JAI (@JaiPaller) September 13, 2019', 'label': 1, 'premise': 'Pinag-iingat ng Konsulado ng Pilipinas sa Dubai ang publiko, partikular ang mga donor, laban sa mga scam na gumagamit ng mga charitable organization.'} ``` I don't have experience with open source pull requests, so I suggest that you reflect them in the source. Thank you for reading :)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2262/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2262/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2261
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2261/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2261/comments
https://api.github.com/repos/huggingface/datasets/issues/2261/events
https://github.com/huggingface/datasets/pull/2261
867,088,818
MDExOlB1bGxSZXF1ZXN0NjIyODIxNzQw
2,261
Improve ReadInstruction logic and update docs
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-25T19:07:26
2021-05-17T18:24:44
2021-05-17T16:48:57
CONTRIBUTOR
null
Improve ReadInstruction logic and docs.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2261/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2261/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2261", "html_url": "https://github.com/huggingface/datasets/pull/2261", "diff_url": "https://github.com/huggingface/datasets/pull/2261.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2261.patch", "merged_at": "2021-05-17T16:48:57" }
true
https://api.github.com/repos/huggingface/datasets/issues/2260
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2260/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2260/comments
https://api.github.com/repos/huggingface/datasets/issues/2260/events
https://github.com/huggingface/datasets/pull/2260
866,961,697
MDExOlB1bGxSZXF1ZXN0NjIyNzMwODYx
2,260
GooAQ dataset added
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-25T09:26:48
2021-05-07T08:36:17
2021-05-07T08:36:17
CONTRIBUTOR
null
@lhoestq here the dataset is stored with Git LFS. Should I add option for manual downloading of dataset using `git lfs pull` post repo cloning or can we accommodate this in the current `download_and_extract`?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2260/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2260/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2260", "html_url": "https://github.com/huggingface/datasets/pull/2260", "diff_url": "https://github.com/huggingface/datasets/pull/2260.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2260.patch", "merged_at": "2021-05-07T08:36:17" }
true
https://api.github.com/repos/huggingface/datasets/issues/2259
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2259/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2259/comments
https://api.github.com/repos/huggingface/datasets/issues/2259/events
https://github.com/huggingface/datasets/pull/2259
866,880,092
MDExOlB1bGxSZXF1ZXN0NjIyNjc2ODA0
2,259
Add support for Split.ALL
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-25T01:45:42
2021-06-28T08:21:27
2021-06-28T08:21:27
CONTRIBUTOR
null
The title says it all.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2259/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2259/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2259", "html_url": "https://github.com/huggingface/datasets/pull/2259", "diff_url": "https://github.com/huggingface/datasets/pull/2259.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2259.patch", "merged_at": "2021-06-28T08:21:27" }
true
https://api.github.com/repos/huggingface/datasets/issues/2258
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2258/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2258/comments
https://api.github.com/repos/huggingface/datasets/issues/2258/events
https://github.com/huggingface/datasets/pull/2258
866,870,588
MDExOlB1bGxSZXF1ZXN0NjIyNjcxNTQy
2,258
Fix incorrect update_metadata_with_features calls in ArrowDataset
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-25T00:48:38
2021-04-26T17:16:30
2021-04-26T16:54:04
CONTRIBUTOR
null
Fixes bugs in the `unpdate_metadata_with_features` calls (caused by changes in #2151)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2258/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2258/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2258", "html_url": "https://github.com/huggingface/datasets/pull/2258", "diff_url": "https://github.com/huggingface/datasets/pull/2258.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2258.patch", "merged_at": "2021-04-26T16:54:04" }
true
https://api.github.com/repos/huggingface/datasets/issues/2257
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2257/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2257/comments
https://api.github.com/repos/huggingface/datasets/issues/2257/events
https://github.com/huggingface/datasets/pull/2257
866,755,203
MDExOlB1bGxSZXF1ZXN0NjIyNTkwMDQw
2,257
added metrics for CUAD
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-24T14:09:54
2021-04-29T09:53:38
2021-04-27T16:16:32
CONTRIBUTOR
null
For now I've added F1, AUPR, Precision at 80% recall, and Precision at 90%. Last 3 metrics were reported in the [paper](https://arxiv.org/pdf/2103.06268.pdf). Please let me know if we require `exact_match` metric too here
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2257/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2257/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2257", "html_url": "https://github.com/huggingface/datasets/pull/2257", "diff_url": "https://github.com/huggingface/datasets/pull/2257.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2257.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2256
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2256/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2256/comments
https://api.github.com/repos/huggingface/datasets/issues/2256/events
https://github.com/huggingface/datasets/issues/2256
866,708,609
MDU6SXNzdWU4NjY3MDg2MDk=
2,256
Running `datase.map` with `num_proc > 1` uses a lot of memory
{ "login": "roskoN", "id": 8143425, "node_id": "MDQ6VXNlcjgxNDM0MjU=", "avatar_url": "https://avatars.githubusercontent.com/u/8143425?v=4", "gravatar_id": "", "url": "https://api.github.com/users/roskoN", "html_url": "https://github.com/roskoN", "followers_url": "https://api.github.com/users/roskoN/followers", "following_url": "https://api.github.com/users/roskoN/following{/other_user}", "gists_url": "https://api.github.com/users/roskoN/gists{/gist_id}", "starred_url": "https://api.github.com/users/roskoN/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/roskoN/subscriptions", "organizations_url": "https://api.github.com/users/roskoN/orgs", "repos_url": "https://api.github.com/users/roskoN/repos", "events_url": "https://api.github.com/users/roskoN/events{/privacy}", "received_events_url": "https://api.github.com/users/roskoN/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting ! We are working on this and we'll do a patch release very soon.", "We did a patch release to fix this issue.\r\nIt should be fixed in the new version 1.6.1\r\n\r\nThanks again for reporting and for the details :)" ]
2021-04-24T09:56:20
2021-04-26T17:12:15
2021-04-26T17:12:15
NONE
null
## Describe the bug Running `datase.map` with `num_proc > 1` leads to a tremendous memory usage that requires swapping on disk and it becomes very slow. ## Steps to reproduce the bug ```python from datasets import load_dataset dstc8_datset = load_dataset("roskoN/dstc8-reddit-corpus", keep_in_memory=False) def _prepare_sample(batch): return {"input_ids": list(), "attention_mask": list()} for split_name, dataset_split in list(dstc8_datset.items()): print(f"Processing {split_name}") encoded_dataset_split = dataset_split.map( function=_prepare_sample, batched=True, num_proc=4, remove_columns=dataset_split.column_names, batch_size=10, writer_batch_size=10, keep_in_memory=False, ) print(encoded_dataset_split) path = f"./data/encoded_{split_name}" encoded_dataset_split.save_to_disk(path) ``` ## Expected results Memory usage should stay within reasonable boundaries. ## Actual results This is htop-output from running the provided script. ![image](https://user-images.githubusercontent.com/8143425/115954836-66954980-a4f3-11eb-8340-0153bdc3a475.png) ## Versions ``` - Datasets: 1.6.0 - Python: 3.8.8 (default, Apr 13 2021, 19:58:26) [GCC 7.3.0] - Platform: Linux-4.19.128-microsoft-standard-x86_64-with-glibc2.10 ``` Running on WSL2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2256/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2256/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2255
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2255/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2255/comments
https://api.github.com/repos/huggingface/datasets/issues/2255/events
https://github.com/huggingface/datasets/pull/2255
866,242,892
MDExOlB1bGxSZXF1ZXN0NjIyMTc0Njg4
2,255
Task casting for text classification & question answering
{ "login": "SBrandeis", "id": 33657802, "node_id": "MDQ6VXNlcjMzNjU3ODAy", "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SBrandeis", "html_url": "https://github.com/SBrandeis", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "repos_url": "https://api.github.com/users/SBrandeis/repos", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-23T16:00:41
2021-05-18T13:31:36
2021-05-18T13:31:35
CONTRIBUTOR
null
This PR implements task preparation for a given task, in the continuation of #2143 Task taxonomy follows 🤗 Transformers's pipelines taxonomy: https://github.com/huggingface/transformers/tree/master/src/transformers/pipelines Edit by @lewtun: This PR implements support for the following tasks: * `text-classification` * `question-answering` The intended usage is as follows: ```python # Load a dataset with default column names / features ds = load_dataset("dataset_name") # Cast column names / features to schema. Casting is defined in the dataset's `DatasetInfo` ds = ds.prepare_for_task(task="text-classification") # Casting can also be realised during load ds = load_dataset("dataset_name", task="text-classification") # We can also combine shared tasks across dataset concatenation ds1 = load_dataset("dataset_name_1", task="text-classification") ds2 = load_dataset("dataset_name_2", task="text-classification") # If the tasks have the same schema, so will `ds_concat` ds_concat = concatenate_datasets([ds1, ds2]) ``` Note that the current implementation assumes that `DatasetInfo.task_templates` has been pre-defined by the user / contributor when overriding the `MyDataset(GeneratorBasedBuilder)._info` function. As pointed out by @SBrandeis, for evaluation we'll need a way to detect which datasets are already have a compatible schema so we don't have to edit hundreds of dataset scripts. One possibility is to check if the schema features are a subset of the dataset ones, e.g. ```python squad = load_dataset("./datasets/squad", split="train") qa = QuestionAnswering() schema = Features({**qa.input_schema, **qa.label_schema}) assert all(item in squad.features.items() for item in schema.items()) ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2255/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2255/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2255", "html_url": "https://github.com/huggingface/datasets/pull/2255", "diff_url": "https://github.com/huggingface/datasets/pull/2255.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2255.patch", "merged_at": "2021-05-18T13:31:35" }
true
https://api.github.com/repos/huggingface/datasets/issues/2254
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2254/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2254/comments
https://api.github.com/repos/huggingface/datasets/issues/2254/events
https://github.com/huggingface/datasets/pull/2254
866,169,312
MDExOlB1bGxSZXF1ZXN0NjIyMTE1NDI0
2,254
Update format, fingerprint and indices after add_item
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-23T14:31:49
2021-04-27T16:30:49
2021-04-27T16:30:48
MEMBER
null
Added fingerprint and format update wrappers + update the indices by adding the index of the newly added item in the table.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2254/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2254/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2254", "html_url": "https://github.com/huggingface/datasets/pull/2254", "diff_url": "https://github.com/huggingface/datasets/pull/2254.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2254.patch", "merged_at": "2021-04-27T16:30:48" }
true
https://api.github.com/repos/huggingface/datasets/issues/2253
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2253/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2253/comments
https://api.github.com/repos/huggingface/datasets/issues/2253/events
https://github.com/huggingface/datasets/pull/2253
866,034,321
MDExOlB1bGxSZXF1ZXN0NjIyMDA2Njg3
2,253
Perform minor refactoring: use config
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 2851292821, "node_id": "MDU6TGFiZWwyODUxMjkyODIx", "url": "https://api.github.com/repos/huggingface/datasets/labels/refactoring", "name": "refactoring", "color": "B67A40", "default": false, "description": "Restructuring existing code without changing its external behavior" } ]
closed
false
null
[]
null
[]
2021-04-23T11:45:47
2021-05-27T09:12:45
2021-04-27T15:02:59
MEMBER
null
Perform minor refactoring related to `config`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2253/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2253/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2253", "html_url": "https://github.com/huggingface/datasets/pull/2253", "diff_url": "https://github.com/huggingface/datasets/pull/2253.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2253.patch", "merged_at": "2021-04-27T15:02:58" }
true
https://api.github.com/repos/huggingface/datasets/issues/2250
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2250/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2250/comments
https://api.github.com/repos/huggingface/datasets/issues/2250/events
https://github.com/huggingface/datasets/issues/2250
865,402,449
MDU6SXNzdWU4NjU0MDI0NDk=
2,250
some issue in loading local txt file as Dataset for run_mlm.py
{ "login": "alighofrani95", "id": 14968123, "node_id": "MDQ6VXNlcjE0OTY4MTIz", "avatar_url": "https://avatars.githubusercontent.com/u/14968123?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alighofrani95", "html_url": "https://github.com/alighofrani95", "followers_url": "https://api.github.com/users/alighofrani95/followers", "following_url": "https://api.github.com/users/alighofrani95/following{/other_user}", "gists_url": "https://api.github.com/users/alighofrani95/gists{/gist_id}", "starred_url": "https://api.github.com/users/alighofrani95/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alighofrani95/subscriptions", "organizations_url": "https://api.github.com/users/alighofrani95/orgs", "repos_url": "https://api.github.com/users/alighofrani95/repos", "events_url": "https://api.github.com/users/alighofrani95/events{/privacy}", "received_events_url": "https://api.github.com/users/alighofrani95/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi,\r\n\r\n1. try\r\n ```python\r\n dataset = load_dataset(\"text\", data_files={\"train\": [\"a1.txt\", \"b1.txt\"], \"test\": [\"c1.txt\"]})\r\n ```\r\n instead.\r\n\r\n Sadly, I can't reproduce the error on my machine. If the above code doesn't resolve the issue, try to update the library to the \r\n newest version (`pip install datasets --upgrade`).\r\n\r\n2. https://github.com/huggingface/transformers/blob/3ed5e97ba04ce9b24b4a7161ea74572598a4c480/examples/pytorch/language-modeling/run_mlm.py#L258-L259\r\nThis is the original code. You'll have to modify the example source to work with multiple train files. To make it easier, let's say \"|\" will act as a delimiter between files:\r\n ```python\r\n if data_args.train_file is not None:\r\n data_files[\"train\"] = data_args.train_file.split(\"|\") # + .split(\"|\")\r\n ```\r\n Then call the script as follows (**dataset_name must be None**):\r\n ```bash\r\n python run_mlm.py [... other args] --train_file a1.txt|b1.txt\r\n ```", "i meet the same error with datasets 1.11.0, is there any insight about this?" ]
2021-04-22T19:39:13
2022-03-30T08:29:47
2022-03-30T08:29:47
NONE
null
![image](https://user-images.githubusercontent.com/14968123/115773877-18cef300-a3c6-11eb-8e58-a9cbfd1001ec.png) first of all, I tried to load 3 .txt files as a dataset (sure that the directory and permission is OK.), I face with the below error. > FileNotFoundError: [Errno 2] No such file or directory: 'c' by removing one of the training .txt files It's fixed and although if I put all file as training it's ok ![image](https://user-images.githubusercontent.com/14968123/115774207-867b1f00-a3c6-11eb-953b-905cfb112d25.png) ![image](https://user-images.githubusercontent.com/14968123/115774264-9b57b280-a3c6-11eb-9f36-7b109f0e5a31.png) after this, my question is how could I use this defined Dataset for run_mlm.py for from scratch pretraining. by using --train_file path_to_train_file just can use one .txt , .csv or, .json file. I tried to set my defined Dataset as --dataset_name but the below issue occurs. > Traceback (most recent call last): File "/usr/local/lib/python3.7/dist-packages/datasets/load.py", line 336, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/master/datasets/dataset/dataset.py > During handling of the above exception, another exception occurred: > Traceback (most recent call last): File "run_mlm.py", line 486, in <module> main() File "run_mlm.py", line 242, in main datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) File "/usr/local/lib/python3.7/dist-packages/datasets/load.py", line 719, in load_dataset use_auth_token=use_auth_token, File "/usr/local/lib/python3.7/dist-packages/datasets/load.py", line 347, in prepare_module combined_path, github_file_path FileNotFoundError: Couldn't find file locally at dataset/dataset.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.6.0/datasets/dataset/dataset.py. The file is also not present on the master branch on github.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2250/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2250/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2248
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2248/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2248/comments
https://api.github.com/repos/huggingface/datasets/issues/2248/events
https://github.com/huggingface/datasets/pull/2248
864,853,447
MDExOlB1bGxSZXF1ZXN0NjIxMDEyNzg5
2,248
Implement Dataset to JSON
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/3", "html_url": "https://github.com/huggingface/datasets/milestone/3", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/3/labels", "id": 6644287, "node_id": "MDk6TWlsZXN0b25lNjY0NDI4Nw==", "number": 3, "title": "1.7", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 3, "state": "closed", "created_at": "2021-04-09T13:16:31", "updated_at": "2021-05-31T16:20:53", "due_on": "2021-05-14T07:00:00", "closed_at": "2021-05-31T16:20:53" }
[]
2021-04-22T11:46:51
2021-04-27T15:29:21
2021-04-27T15:29:20
MEMBER
null
Implement `Dataset.to_json`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2248/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2248/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2248", "html_url": "https://github.com/huggingface/datasets/pull/2248", "diff_url": "https://github.com/huggingface/datasets/pull/2248.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2248.patch", "merged_at": "2021-04-27T15:29:20" }
true
https://api.github.com/repos/huggingface/datasets/issues/2247
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2247/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2247/comments
https://api.github.com/repos/huggingface/datasets/issues/2247/events
https://github.com/huggingface/datasets/pull/2247
864,817,520
MDExOlB1bGxSZXF1ZXN0NjIwOTgzNzY3
2,247
Implement Dataset from Parquet
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/7", "html_url": "https://github.com/huggingface/datasets/milestone/7", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/7/labels", "id": 6931350, "node_id": "MDk6TWlsZXN0b25lNjkzMTM1MA==", "number": 7, "title": "1.11", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 2, "state": "closed", "created_at": "2021-07-09T05:49:00", "updated_at": "2021-09-02T05:34:03", "due_on": "2021-07-30T07:00:00", "closed_at": "2021-09-02T05:34:03" }
[]
2021-04-22T11:01:38
2021-07-26T13:28:52
2021-07-26T13:28:51
MEMBER
null
Implement instantiation of Dataset from Parquet file.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2247/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2247/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2247", "html_url": "https://github.com/huggingface/datasets/pull/2247", "diff_url": "https://github.com/huggingface/datasets/pull/2247.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2247.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2246
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2246/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2246/comments
https://api.github.com/repos/huggingface/datasets/issues/2246/events
https://github.com/huggingface/datasets/pull/2246
864,220,031
MDExOlB1bGxSZXF1ZXN0NjIwNDg3OTUw
2,246
Faster map w/ input_columns & faster slicing w/ Iterable keys
{ "login": "norabelrose", "id": 39116809, "node_id": "MDQ6VXNlcjM5MTE2ODA5", "avatar_url": "https://avatars.githubusercontent.com/u/39116809?v=4", "gravatar_id": "", "url": "https://api.github.com/users/norabelrose", "html_url": "https://github.com/norabelrose", "followers_url": "https://api.github.com/users/norabelrose/followers", "following_url": "https://api.github.com/users/norabelrose/following{/other_user}", "gists_url": "https://api.github.com/users/norabelrose/gists{/gist_id}", "starred_url": "https://api.github.com/users/norabelrose/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/norabelrose/subscriptions", "organizations_url": "https://api.github.com/users/norabelrose/orgs", "repos_url": "https://api.github.com/users/norabelrose/repos", "events_url": "https://api.github.com/users/norabelrose/events{/privacy}", "received_events_url": "https://api.github.com/users/norabelrose/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-21T19:49:07
2021-04-26T16:13:59
2021-04-26T16:13:59
CONTRIBUTOR
null
@lhoestq Fixes #2193 - `map` now uses `with_format` to only load needed columns in memory when `input_columns` is set - Slicing datasets with Iterables of indices now uses a new `Table.fast_gather` method, implemented with `np.searchsorted`, to find the appropriate batch indices all at once. `pa.concat_tables` is no longer used for this; we just call `pa.Table.from_batches` with a list of all the batch slices. Together these changes have sped up batched `map()` calls over subsets of columns quite considerably in my initial testing.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2246/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2246/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2246", "html_url": "https://github.com/huggingface/datasets/pull/2246", "diff_url": "https://github.com/huggingface/datasets/pull/2246.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2246.patch", "merged_at": "2021-04-26T16:13:58" }
true
https://api.github.com/repos/huggingface/datasets/issues/2245
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2245/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2245/comments
https://api.github.com/repos/huggingface/datasets/issues/2245/events
https://github.com/huggingface/datasets/pull/2245
863,191,655
MDExOlB1bGxSZXF1ZXN0NjE5NjQzMjQ3
2,245
Add `key` type and duplicates verification with hashing
{ "login": "NikhilBartwal", "id": 42388668, "node_id": "MDQ6VXNlcjQyMzg4NjY4", "avatar_url": "https://avatars.githubusercontent.com/u/42388668?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikhilBartwal", "html_url": "https://github.com/NikhilBartwal", "followers_url": "https://api.github.com/users/NikhilBartwal/followers", "following_url": "https://api.github.com/users/NikhilBartwal/following{/other_user}", "gists_url": "https://api.github.com/users/NikhilBartwal/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikhilBartwal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikhilBartwal/subscriptions", "organizations_url": "https://api.github.com/users/NikhilBartwal/orgs", "repos_url": "https://api.github.com/users/NikhilBartwal/repos", "events_url": "https://api.github.com/users/NikhilBartwal/events{/privacy}", "received_events_url": "https://api.github.com/users/NikhilBartwal/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-20T20:03:19
2021-05-10T18:04:37
2021-05-10T17:31:22
CONTRIBUTOR
null
Closes #2230 There is currently no verification for the data type and the uniqueness of the keys yielded by the `dataset_builder`. This PR is currently a work in progress with the following goals: - [x] Adding `hash_salt` to `ArrowWriter` so that the keys belonging to different splits have different hash - [x] Add `key` arrtibute to `ArrowWriter.write()` for hashing - [x] Add a hashing class which takes an input key of certain type (`str`/`int`/anything convertible to string) and produces a 128-bit hash using `hashlib.md5` - [x] Creating a function giving a custom error message when non-unique keys are found **[This will take care of type-checking for keys]** - [x] Checking for duplicate keys in `writer.write()` for each batch [**NOTE**: This PR is currently concerned with `GeneratorBasedBuilder` only, for simplification. A subsequent PR will be made in future for `ArrowBasedBuilder`] @lhoestq Thank you for the feedback. It would be great to have your guidance on this!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2245/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2245/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2245", "html_url": "https://github.com/huggingface/datasets/pull/2245", "diff_url": "https://github.com/huggingface/datasets/pull/2245.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2245.patch", "merged_at": "2021-05-10T17:31:21" }
true
https://api.github.com/repos/huggingface/datasets/issues/2243
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2243/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2243/comments
https://api.github.com/repos/huggingface/datasets/issues/2243/events
https://github.com/huggingface/datasets/issues/2243
862,909,389
MDU6SXNzdWU4NjI5MDkzODk=
2,243
Map is slow and processes batches one after another
{ "login": "villmow", "id": 2743060, "node_id": "MDQ6VXNlcjI3NDMwNjA=", "avatar_url": "https://avatars.githubusercontent.com/u/2743060?v=4", "gravatar_id": "", "url": "https://api.github.com/users/villmow", "html_url": "https://github.com/villmow", "followers_url": "https://api.github.com/users/villmow/followers", "following_url": "https://api.github.com/users/villmow/following{/other_user}", "gists_url": "https://api.github.com/users/villmow/gists{/gist_id}", "starred_url": "https://api.github.com/users/villmow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/villmow/subscriptions", "organizations_url": "https://api.github.com/users/villmow/orgs", "repos_url": "https://api.github.com/users/villmow/repos", "events_url": "https://api.github.com/users/villmow/events{/privacy}", "received_events_url": "https://api.github.com/users/villmow/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @villmow, thanks for reporting.\r\n\r\nCould you please try with the Datasets version 1.6? We released it yesterday and it fixes some issues about the processing speed. You can see the fix implemented by @lhoestq here: #2122.\r\n\r\nOnce you update Datasets, please confirm if the problem persists.", "Hi @albertvillanova, thanks for the reply. I just tried the new version and the problem still persists. \r\n\r\nDo I need to rebuild the saved dataset (which I load from disk) with the 1.6.0 version of datasets? My script loads this dataset and creates new datasets from it. I tried it without rebuilding.\r\n\r\nSee this short video of what happens. It does not create all processes at the same time:\r\n\r\nhttps://user-images.githubusercontent.com/2743060/115720139-0da3a500-a37d-11eb-833a-9bbacc70868d.mp4\r\n\r\n", "There can be a bit of delay between the creations of the processes but this delay should be the same for both your `map` calls. We should look into this.\r\nAlso if you hav some code that reproduces this issue on google colab that'd be really useful !\r\n\r\nRegarding the speed differences:\r\nThis looks like a similar issue as https://github.com/huggingface/datasets/issues/1992 who is experiencing the same speed differences between processes.\r\nThis is a known bug that we are investigating. As of now I've never managed to reproduce it on my machine so it's pretty hard for me to find where this issue comes from.\r\n", "Upgrade to 1.6.1 solved my problem somehow. I did not change any of my code, but now it starts all processes around the same time.", "Nice ! I'm glad this works now.\r\nClosing for now, but feel free to re-open if you experience this issue again." ]
2021-04-20T14:58:20
2021-05-03T17:54:33
2021-05-03T17:54:32
NONE
null
## Describe the bug I have a somewhat unclear bug to me, where I can't figure out what the problem is. The code works as expected on a small subset of my dataset (2000 samples) on my local machine, but when I execute the same code with a larger dataset (1.4 million samples) this problem occurs. Thats why I can't give exact steps to reproduce, I'm sorry. I process a large dataset in a two step process. I first call map on a dataset I load from disk and create a new dataset from it. This works like expected and `map` uses all workers I started it with. Then I process the dataset created by the first step, again with `map`, which is really slow and starting only one or two process at a time. Number of processes is the same for both steps. pseudo code: ```python ds = datasets.load_from_disk("path") new_dataset = ds.map(work, batched=True, ...) # fast uses all processes final_dataset = new_dataset.map(work2, batched=True, ...) # slow starts one process after another ``` ## Expected results Second stage should be as fast as the first stage. ## Versions Paste the output of the following code: - Datasets: 1.5.0 - Python: 3.8.8 (default, Feb 24 2021, 21:46:12) - Platform: Linux-5.4.0-60-generic-x86_64-with-glibc2.10 Do you guys have any idea? Thanks a lot!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2243/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2243/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2242
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2242/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2242/comments
https://api.github.com/repos/huggingface/datasets/issues/2242/events
https://github.com/huggingface/datasets/issues/2242
862,870,205
MDU6SXNzdWU4NjI4NzAyMDU=
2,242
Link to datasets viwer on Quick Tour page returns "502 Bad Gateway"
{ "login": "martavillegas", "id": 6735707, "node_id": "MDQ6VXNlcjY3MzU3MDc=", "avatar_url": "https://avatars.githubusercontent.com/u/6735707?v=4", "gravatar_id": "", "url": "https://api.github.com/users/martavillegas", "html_url": "https://github.com/martavillegas", "followers_url": "https://api.github.com/users/martavillegas/followers", "following_url": "https://api.github.com/users/martavillegas/following{/other_user}", "gists_url": "https://api.github.com/users/martavillegas/gists{/gist_id}", "starred_url": "https://api.github.com/users/martavillegas/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/martavillegas/subscriptions", "organizations_url": "https://api.github.com/users/martavillegas/orgs", "repos_url": "https://api.github.com/users/martavillegas/repos", "events_url": "https://api.github.com/users/martavillegas/events{/privacy}", "received_events_url": "https://api.github.com/users/martavillegas/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "This should be fixed now!\r\n\r\ncc @srush " ]
2021-04-20T14:19:51
2021-04-20T15:02:45
2021-04-20T15:02:45
NONE
null
Link to datasets viwer (https://huggingface.co/datasets/viewer/) on Quick Tour page (https://huggingface.co/docs/datasets/quicktour.html) returns "502 Bad Gateway" The same error with https://huggingface.co/datasets/viewer/?dataset=glue&config=mrpc
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2242/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2242/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2241
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2241/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2241/comments
https://api.github.com/repos/huggingface/datasets/issues/2241/events
https://github.com/huggingface/datasets/pull/2241
862,696,460
MDExOlB1bGxSZXF1ZXN0NjE5MjI0MzIw
2,241
Add SLR32 to OpenSLR
{ "login": "cahya-wirawan", "id": 7669893, "node_id": "MDQ6VXNlcjc2Njk4OTM=", "avatar_url": "https://avatars.githubusercontent.com/u/7669893?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cahya-wirawan", "html_url": "https://github.com/cahya-wirawan", "followers_url": "https://api.github.com/users/cahya-wirawan/followers", "following_url": "https://api.github.com/users/cahya-wirawan/following{/other_user}", "gists_url": "https://api.github.com/users/cahya-wirawan/gists{/gist_id}", "starred_url": "https://api.github.com/users/cahya-wirawan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cahya-wirawan/subscriptions", "organizations_url": "https://api.github.com/users/cahya-wirawan/orgs", "repos_url": "https://api.github.com/users/cahya-wirawan/repos", "events_url": "https://api.github.com/users/cahya-wirawan/events{/privacy}", "received_events_url": "https://api.github.com/users/cahya-wirawan/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-20T11:02:45
2021-04-23T16:21:24
2021-04-23T15:36:15
CONTRIBUTOR
null
I would like to add SLR32 to OpenSLR. It contains four South African languages: Afrikaans, Sesotho, Setswana and isiXhosa
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2241/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2241/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2241", "html_url": "https://github.com/huggingface/datasets/pull/2241", "diff_url": "https://github.com/huggingface/datasets/pull/2241.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2241.patch", "merged_at": "2021-04-23T15:36:15" }
true
https://api.github.com/repos/huggingface/datasets/issues/2240
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2240/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2240/comments
https://api.github.com/repos/huggingface/datasets/issues/2240/events
https://github.com/huggingface/datasets/pull/2240
862,537,856
MDExOlB1bGxSZXF1ZXN0NjE5MDkyODc5
2,240
Clarify how to load wikihow
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-20T08:02:58
2021-04-21T09:54:57
2021-04-21T09:54:57
MEMBER
null
Explain clearer how to load the dataset in the manual download instructions. En relation with #2239.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2240/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2240/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2240", "html_url": "https://github.com/huggingface/datasets/pull/2240", "diff_url": "https://github.com/huggingface/datasets/pull/2240.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2240.patch", "merged_at": "2021-04-21T09:54:57" }
true
https://api.github.com/repos/huggingface/datasets/issues/2239
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2239/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2239/comments
https://api.github.com/repos/huggingface/datasets/issues/2239/events
https://github.com/huggingface/datasets/issues/2239
861,904,306
MDU6SXNzdWU4NjE5MDQzMDY=
2,239
Error loading wikihow dataset
{ "login": "odellus", "id": 4686956, "node_id": "MDQ6VXNlcjQ2ODY5NTY=", "avatar_url": "https://avatars.githubusercontent.com/u/4686956?v=4", "gravatar_id": "", "url": "https://api.github.com/users/odellus", "html_url": "https://github.com/odellus", "followers_url": "https://api.github.com/users/odellus/followers", "following_url": "https://api.github.com/users/odellus/following{/other_user}", "gists_url": "https://api.github.com/users/odellus/gists{/gist_id}", "starred_url": "https://api.github.com/users/odellus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/odellus/subscriptions", "organizations_url": "https://api.github.com/users/odellus/orgs", "repos_url": "https://api.github.com/users/odellus/repos", "events_url": "https://api.github.com/users/odellus/events{/privacy}", "received_events_url": "https://api.github.com/users/odellus/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @odellus, thanks for reporting.\r\n\r\nThe `wikihow` dataset has 2 versions:\r\n- `all`: Consisting of the concatenation of all paragraphs as the articles and the bold lines as the reference summaries.\r\n- `sep`: Consisting of each paragraph and its summary.\r\n\r\nTherefore, in order to load it, you have to specify which version you would like, for example:\r\n```python\r\ndataset = load_dataset('wikihow', 'all')\r\n```\r\n\r\nPlease, tell me if this solves your problem.", "Good call out. I did try that and that's when it told me to download the\ndataset. Don't believe I have tried it with local files. Will try first\nthing in the morning and get back to you.\n\nOn Mon, Apr 19, 2021, 11:17 PM Albert Villanova del Moral <\n***@***.***> wrote:\n\n> Hi @odellus <https://github.com/odellus>, thanks for reporting.\n>\n> The wikihow dataset has 2 versions:\n>\n> - all: Consisting of the concatenation of all paragraphs as the\n> articles and the bold lines as the reference summaries.\n> - sep: Consisting of each paragraph and its summary.\n>\n> Therefore, in order to load it, you have to specify which version you\n> would like, for example:\n>\n> dataset = load_dataset('wikihow', 'all')\n>\n> Please, tell me if this solves your problem.\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> <https://github.com/huggingface/datasets/issues/2239#issuecomment-823004146>,\n> or unsubscribe\n> <https://github.com/notifications/unsubscribe-auth/ABDYI3HVRTBI2QT3BOG262DTJUL57ANCNFSM43GV5BZQ>\n> .\n>\n", "Hi @odellus, yes you are right.\r\n\r\nDue to the server where the `wikihow` dataset is hosted, the dataset can't be downloaded automatically by `huggingface` and you have to download it manually as you did.\r\n\r\nNevertheless, you have to specify which dataset version you would like to load anyway:\r\n```python\r\ndataset = load_dataset('wikihow', 'all', data_dir='./wikihow')\r\n```\r\nor\r\n```python\r\ndataset = load_dataset('wikihow', 'sep', data_dir='./wikihow')\r\n```\r\nI find that the instructions given by `huggingface` are not clear enough: I am going to fix this.\r\nPlease tell me if this eventually works for you.", "That was it. Thank you Albert!" ]
2021-04-19T21:02:31
2021-04-20T16:33:11
2021-04-20T16:33:11
CONTRIBUTOR
null
## Describe the bug When attempting to load wikihow into a dataset with ```python from datasets import load_dataset dataset = load_dataset('wikihow', data_dir='./wikihow') ``` I get the message: ``` AttributeError: 'BuilderConfig' object has no attribute 'filename' ``` at the end of a [full stack trace](https://gist.github.com/odellus/602c3b2de52f541d353b1022f320ffc2). ## Steps to reproduce the bug I have followed the instructions for creating a wikihow dataset. The [wikihow dataset site](https://huggingface.co/datasets/wikihow) says to use ```python from datasets import load_dataset dataset = load_dataset('wikihow') ``` to load the dataset. I do so and I get the message ``` AssertionError: The dataset wikihow with config all requires manual data. Please follow the manual download instructions: You need to manually download two wikihow files. An overview of which files to download can be seen at https://github.com/mahnazkoupaee/WikiHow-Dataset. You need to download the following two files manually: 1) https://ucsb.app.box.com/s/ap23l8gafpezf4tq3wapr6u8241zz358 and save the file under <path/to/folder>/wikihowAll.csv 2) https://ucsb.app.box.com/s/7yq601ijl1lzvlfu4rjdbbxforzd2oag and save the file under <path/to/folder>/wikihowSep.csv The <path/to/folder> can e.g. be "~/manual_wikihow_data". Wikihow can then be loaded using the following command `datasets.load_dataset("wikihow", data_dir="<path/to/folder>")`. . Manual data can be loaded with `datasets.load_dataset(wikihow, data_dir='<path/to/manual/data>') ``` So I create a directory `./wikihow` and download `wikihowAll.csv` and `wikihowSep.csv` into the new directory. Then I run ```python from datasets import load_dataset dataset = load_dataset('wikihow', data_dir='./wikihow') ``` that's when I get the [stack trace](https://gist.github.com/odellus/602c3b2de52f541d353b1022f320ffc2) ## Expected results I expected it to load the downloaded files into a dataset. ## Actual results ```python Using custom data configuration default-data_dir=.%2Fwikihow Downloading and preparing dataset wikihow/default (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /home/azureuser/.cache/huggingface/datasets/wikihow/default-data_dir=.%2Fwikihow/0.0.0/58f42f8f0e4d459811a0f69aaab35870093830ccd58006769e7e1eb3e0e686c2... --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-9-5e4d40142f30> in <module> ----> 1 dataset = load_dataset('wikihow',data_dir='./wikihow') ~/.local/lib/python3.6/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, **config_kwargs) 745 try_from_hf_gcs=try_from_hf_gcs, 746 base_path=base_path,--> 747 use_auth_token=use_auth_token, 748 ) 749 ~/.local/lib/python3.6/site-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 577 if not downloaded_from_gcs: 578 self._download_and_prepare( --> 579 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 580 ) 581 # Sync info ~/.local/lib/python3.6/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 632 split_dict = SplitDict(dataset_name=self.name) 633 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 634 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 635 636 # Checksums verification ~/.cache/huggingface/modules/datasets_modules/datasets/wikihow/58f42f8f0e4d459811a0f69aaab35870093830ccd58006769e7e1eb3e0e686c2/wikihow.py in _split_generators(self, dl_manager) 132 133 path_to_manual_file = os.path.join( --> 134 os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), self.config.filename 135 ) 136 AttributeError: 'BuilderConfig' object has no attribute 'filename' ``` ## Versions Paste the output of the following code: ```python import datasets import sys import platform print(f""" - Datasets: {datasets.__version__} - Python: {sys.version} - Platform: {platform.platform()} """) ``` ``` - Datasets: 1.5.0 - Python: 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0] - Platform: Linux-5.4.0-1046-azure-x86_64-with-Ubuntu-18.04-bionic ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2239/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2239/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2238
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2238/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2238/comments
https://api.github.com/repos/huggingface/datasets/issues/2238/events
https://github.com/huggingface/datasets/pull/2238
861,518,291
MDExOlB1bGxSZXF1ZXN0NjE4MTY5NzM5
2,238
NLU evaluation data
{ "login": "dkajtoch", "id": 32985207, "node_id": "MDQ6VXNlcjMyOTg1MjA3", "avatar_url": "https://avatars.githubusercontent.com/u/32985207?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dkajtoch", "html_url": "https://github.com/dkajtoch", "followers_url": "https://api.github.com/users/dkajtoch/followers", "following_url": "https://api.github.com/users/dkajtoch/following{/other_user}", "gists_url": "https://api.github.com/users/dkajtoch/gists{/gist_id}", "starred_url": "https://api.github.com/users/dkajtoch/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dkajtoch/subscriptions", "organizations_url": "https://api.github.com/users/dkajtoch/orgs", "repos_url": "https://api.github.com/users/dkajtoch/repos", "events_url": "https://api.github.com/users/dkajtoch/events{/privacy}", "received_events_url": "https://api.github.com/users/dkajtoch/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-19T16:47:20
2021-04-23T15:32:05
2021-04-23T15:32:05
CONTRIBUTOR
null
New intent classification dataset from https://github.com/xliuhw/NLU-Evaluation-Data
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2238/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2238/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2238", "html_url": "https://github.com/huggingface/datasets/pull/2238", "diff_url": "https://github.com/huggingface/datasets/pull/2238.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2238.patch", "merged_at": "2021-04-23T15:32:05" }
true
https://api.github.com/repos/huggingface/datasets/issues/2235
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2235/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2235/comments
https://api.github.com/repos/huggingface/datasets/issues/2235/events
https://github.com/huggingface/datasets/pull/2235
861,040,716
MDExOlB1bGxSZXF1ZXN0NjE3Nzc0NDUw
2,235
Update README.md
{ "login": "PierreColombo", "id": 22492839, "node_id": "MDQ6VXNlcjIyNDkyODM5", "avatar_url": "https://avatars.githubusercontent.com/u/22492839?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PierreColombo", "html_url": "https://github.com/PierreColombo", "followers_url": "https://api.github.com/users/PierreColombo/followers", "following_url": "https://api.github.com/users/PierreColombo/following{/other_user}", "gists_url": "https://api.github.com/users/PierreColombo/gists{/gist_id}", "starred_url": "https://api.github.com/users/PierreColombo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PierreColombo/subscriptions", "organizations_url": "https://api.github.com/users/PierreColombo/orgs", "repos_url": "https://api.github.com/users/PierreColombo/repos", "events_url": "https://api.github.com/users/PierreColombo/events{/privacy}", "received_events_url": "https://api.github.com/users/PierreColombo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-19T08:21:02
2021-04-19T12:49:19
2021-04-19T12:49:19
CONTRIBUTOR
null
Adding relevant citations (paper accepted at AAAI 2020 & EMNLP 2020) to the benchmark
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2235/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2235/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2235", "html_url": "https://github.com/huggingface/datasets/pull/2235", "diff_url": "https://github.com/huggingface/datasets/pull/2235.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2235.patch", "merged_at": "2021-04-19T12:49:19" }
true
https://api.github.com/repos/huggingface/datasets/issues/2234
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2234/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2234/comments
https://api.github.com/repos/huggingface/datasets/issues/2234/events
https://github.com/huggingface/datasets/pull/2234
860,442,246
MDExOlB1bGxSZXF1ZXN0NjE3MzI4NDU3
2,234
Fix bash snippet formatting in ADD_NEW_DATASET.md
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-17T16:01:08
2021-04-19T10:57:31
2021-04-19T07:51:36
CONTRIBUTOR
null
This PR indents the paragraphs around the bash snippets in ADD_NEW_DATASET.md to fix formatting.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2234/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2234/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2234", "html_url": "https://github.com/huggingface/datasets/pull/2234", "diff_url": "https://github.com/huggingface/datasets/pull/2234.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2234.patch", "merged_at": "2021-04-19T07:51:36" }
true
https://api.github.com/repos/huggingface/datasets/issues/2233
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2233/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2233/comments
https://api.github.com/repos/huggingface/datasets/issues/2233/events
https://github.com/huggingface/datasets/pull/2233
860,097,084
MDExOlB1bGxSZXF1ZXN0NjE3MDYwMTkw
2,233
Fix `xnli` dataset tuple key
{ "login": "NikhilBartwal", "id": 42388668, "node_id": "MDQ6VXNlcjQyMzg4NjY4", "avatar_url": "https://avatars.githubusercontent.com/u/42388668?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikhilBartwal", "html_url": "https://github.com/NikhilBartwal", "followers_url": "https://api.github.com/users/NikhilBartwal/followers", "following_url": "https://api.github.com/users/NikhilBartwal/following{/other_user}", "gists_url": "https://api.github.com/users/NikhilBartwal/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikhilBartwal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikhilBartwal/subscriptions", "organizations_url": "https://api.github.com/users/NikhilBartwal/orgs", "repos_url": "https://api.github.com/users/NikhilBartwal/repos", "events_url": "https://api.github.com/users/NikhilBartwal/events{/privacy}", "received_events_url": "https://api.github.com/users/NikhilBartwal/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-16T19:12:42
2021-04-19T08:56:42
2021-04-19T08:56:42
CONTRIBUTOR
null
Closes #2229 The `xnli` dataset yields a tuple key in case of `ar` which is inconsistant with the acceptable key types (str/int). The key was thus ported to `str` keeping the original information intact.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2233/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2233/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2233", "html_url": "https://github.com/huggingface/datasets/pull/2233", "diff_url": "https://github.com/huggingface/datasets/pull/2233.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2233.patch", "merged_at": "2021-04-19T08:56:42" }
true
https://api.github.com/repos/huggingface/datasets/issues/2232
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2232/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2232/comments
https://api.github.com/repos/huggingface/datasets/issues/2232/events
https://github.com/huggingface/datasets/pull/2232
860,075,931
MDExOlB1bGxSZXF1ZXN0NjE3MDQyNTI4
2,232
Start filling GLUE dataset card
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-16T18:37:37
2021-04-21T09:33:09
2021-04-21T09:33:08
MEMBER
null
The dataset card was pretty much empty. I added the descriptions (mainly from TFDS since the script is the same), and I also added the tasks tags as well as examples for a subset of the tasks. cc @sgugger
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2232/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2232/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2232", "html_url": "https://github.com/huggingface/datasets/pull/2232", "diff_url": "https://github.com/huggingface/datasets/pull/2232.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2232.patch", "merged_at": "2021-04-21T09:33:08" }
true
https://api.github.com/repos/huggingface/datasets/issues/2231
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2231/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2231/comments
https://api.github.com/repos/huggingface/datasets/issues/2231/events
https://github.com/huggingface/datasets/pull/2231
859,850,488
MDExOlB1bGxSZXF1ZXN0NjE2ODYyNTEx
2,231
Fix map when removing columns on a formatted dataset
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-16T14:08:55
2021-04-16T15:10:05
2021-04-16T15:10:04
MEMBER
null
This should fix issue #2226 The `remove_columns` argument was ignored on formatted datasets
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2231/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2231/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2231", "html_url": "https://github.com/huggingface/datasets/pull/2231", "diff_url": "https://github.com/huggingface/datasets/pull/2231.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2231.patch", "merged_at": "2021-04-16T15:10:04" }
true
https://api.github.com/repos/huggingface/datasets/issues/2230
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2230/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2230/comments
https://api.github.com/repos/huggingface/datasets/issues/2230/events
https://github.com/huggingface/datasets/issues/2230
859,817,159
MDU6SXNzdWU4NTk4MTcxNTk=
2,230
Keys yielded while generating dataset are not being checked
{ "login": "NikhilBartwal", "id": 42388668, "node_id": "MDQ6VXNlcjQyMzg4NjY4", "avatar_url": "https://avatars.githubusercontent.com/u/42388668?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikhilBartwal", "html_url": "https://github.com/NikhilBartwal", "followers_url": "https://api.github.com/users/NikhilBartwal/followers", "following_url": "https://api.github.com/users/NikhilBartwal/following{/other_user}", "gists_url": "https://api.github.com/users/NikhilBartwal/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikhilBartwal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikhilBartwal/subscriptions", "organizations_url": "https://api.github.com/users/NikhilBartwal/orgs", "repos_url": "https://api.github.com/users/NikhilBartwal/repos", "events_url": "https://api.github.com/users/NikhilBartwal/events{/privacy}", "received_events_url": "https://api.github.com/users/NikhilBartwal/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Hi ! Indeed there's no verification on the uniqueness nor the types of the keys.\r\nDo you already have some ideas of what you would like to implement and how ?", "Hey @lhoestq, thank you so much for the opportunity.\r\nAlthough I haven't had much experience with the HF Datasets code, after a careful look at how the `ArrowWriter` functions, I think we can implement this as follows:\r\n\r\n1. First, we would have to update the `ArrowWriter.write()` function here:\r\nhttps://github.com/huggingface/datasets/blob/fcd3c3c8e3b1d9a2f3686a496082e21f06591380/src/datasets/arrow_writer.py#L296\r\nso that it accepts an additional argument `key` which would be appended along with the example here after hashing.\r\n\r\n2. Then, we would need to create a `Hasher` class which will take the key as its input and return a hash for it (We might need to use some hash salt which can be passed to the ArrowWriter.writer() with value equal to the `split_name` for differentiating between same keys of different splits)\r\n\r\n We can use the `hashlib.md5` function for hashing which will conert each key to its byte code before hashing (depending on the data type of the key) **Thus, the `key` type will be verified here**.\r\n\r\n3. Now, we would have to edit this\r\nhttps://github.com/huggingface/datasets/blob/fcd3c3c8e3b1d9a2f3686a496082e21f06591380/src/datasets/arrow_writer.py#L257\r\n so that it iterates over each `(hash, example)` pair (sorted according to hash). We can then simply **check whether each hash is different from the previous hash** (since they will be sorted)\r\n\r\nHowever, since I'm not very familiar with how the data is being written on disk in the form of a table, I might need some guidance for Step 3. \r\nPlease let me know your thought on this. Thanks!", "Interesting !\r\nWe keep the dataset sorted in the order examples are generated by the builder (we expect the dataset builders to generate examples in deterministic order). Therefore I don't think we should shuffle the examples with the hashing. Let me know what you think.\r\nOther that that, I really like the idea of checking for keys duplicates in `write_examples_on_file` :)\r\n\r\nThis looks like a great plan ! Feel free to open a PR and ping me if you have questions or if I can help\r\n", "@lhoestq I'm glad you liked the idea!\r\nI think that since the keys will be unique and deterministic in the nature themselves, so even if we shuffle the examples according to the hash, a deterministic order would still be maintained (as the keys will always have the same hash, whenever the dataset is generated). \r\nAnd since, we are not dealing with time series data (which would require the data to be in original order), I don't think the order of examples would matter much, as long as the order is deterministic and constant for all users.\r\n\r\nI think that this is also what was originally envisioned as mentioned in the documentation here:\r\nhttps://github.com/huggingface/datasets/blob/6775661b19d2ec339784f3d84553a3996a1d86c3/src/datasets/builder.py#L973\r\n\r\nAlso, if we avoid this, we would need to keep track of all the hashed keys in some place and compare each individual key with all others. This can cause some major overhead as each dataset consists of tens of thousands of examples.\r\nLet me know your thoughts in it! I would be opening a PR soon :)", "When users load their own data, they expect the order to stay the same. I think that shuffling the data can make things inconvenient.\r\n\r\n> I think that this is also what was originally envisioned as mentioned in the documentation here:\r\n\r\nThis part was originally developed by tensorflow datasets, and tensorflow datasets indeed does the shuffling. However in this library this is probably not what we want in the general case. But if @albertvillanova and @thomwolf you have opinions on this please let us know.\r\n\r\n> Also, if we avoid this, we would need to keep track of all the hashed keys in some place and compare each individual key with all others. This can cause some major overhead as each dataset consists of tens of thousands of examples.\r\n\r\nMaybe we cam simply keep track of the hashes of of each batch being written ? The size of the batch when the data are save in arrow is 10 000 examples. This would only ensure that we don't have duplicates in each batch, but there might still be duplicates across batches. For 10 000 examples the hashes can just be stored as a python `set`.\r\n\r\nOtherwise if we want full deduplication, we need an extra tool that allows to temporarily save and query hashes that may need to use disk space rather than memory.", "Yes I think we want to keep the original order by default and only shuffle when the user ask for it (for instance by calling `dataset.shuffle()`). That’s how I had it in mind originally.", "Hey @lhoestq, I just had a more in-depth look at the original TFDS code about why the keys and hash were used in the first place.\r\n\r\nIn my opinion, the only use that the `hash(key)` serves is that it allows us to shuffle the examples in a deterministic order (as each example will always yield the same key and thus, the same hash on every system) so that the same dataset is generated for each user, irrespective of the order the examples are yielded by the dataset builder on different user systems.\r\n\r\nOtherwise, if we are not shuffling, then while yielding and writing the data, after getting the key and hashing it for an example, I can't quite see the use of the hash or the key. The hash will simply be generated for each example but not actually used anywhere?\r\n\r\n@lhoestq @thomwolf It would be great if you could explain a bit more about the usage of keys. Thanks!\r\n", "In `datasets` the keys are currently ignored.\r\nFor shuffling we don't use the keys. Instead we shuffle an array of indices. Since both the original order of the dataset and the indices shuffling are deterministic, then `dataset.shuffle` is deterministic as well.\r\nWe can use it to:\r\n1. detect duplicates\r\n2. verify that the generation order is indeed deterministic\r\n3. maybe more ?", "Thanks a lot @lhoestq. I think I understand what we need to do now. The keys can indeed be used for detecting duplicates in generated examples as well as ensuring the order.\r\n\r\n> Maybe we cam simply keep track of the hashes of of each batch being written ? The size of the batch when the data are save in arrow is 10 000 examples. This would only ensure that we don't have duplicates in each batch,\r\n\r\nI think that checking for duplicates in every batch independently would be sufficient as the probability of collisions using something like `MD5` is very low. I would be opening a draft PR soon. It would be great to have your guidance. Thanks!" ]
2021-04-16T13:29:47
2021-05-10T17:31:21
2021-05-10T17:31:21
CONTRIBUTOR
null
The keys used in the dataset generation script to ensure the same order is generated on every user's end should be checked for their types (i.e either `str` or `int`) as well as whether they are unique or not. Currently, the keys are not being checked for any of these, as evident from `xnli' dataset generation: https://github.com/huggingface/datasets/blob/56346791aed417306d054d89bd693d6b7eab17f7/datasets/xnli/xnli.py#L196 Even after having a tuple as key, the dataset is generated without any warning. Also, as tested in the case of `anli` dataset (I tweeked the dataset script to use `1` as a key for every example): ``` >>> import datasets >>> nik = datasets.load_dataset('anli') Downloading and preparing dataset anli/plain_text (download: 17.76 MiB, generated: 73.55 MiB, post-processed: Unknown size, total: 91.31 MiB) to C:\Users\nikhil\.cache\huggingface\datasets\anli\plain_text\0.1.0\43fa2c99c10bf8478f1fa0860f7b122c6b277c4c41306255b7641257cf4e3299... 0 examples [00:00, ? examples/s]1 {'uid': '0fd0abfb-659e-4453-b196-c3a64d2d8267', 'premise': 'The Parma trolleybus system (Italian: "Rete filoviaria di Parma" ) forms part of the public transport network of the city and "comune" of Parma, in the region of Emilia-Romagna, northern Italy. In operation since 1953, the system presently comprises four urban routes.', 'hypothesis': 'The trolleybus system has over 2 urban routes', 'label': 'entailment', 'reason': ''} 2021-04-16 12:38:14.483968: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library cudart64_110.dll 1 examples [00:01, 1.87s/ examples]1 {'uid': '7ed72ff4-40b7-4f8a-b1b9-6c612aa62c84', 'premise': 'Alexandra Lendon Bastedo (9 March 1946 – 12 January 2014) was a British actress, best known for her role as secret agent Sharron Macready in the 1968 British espionage/science fiction adventure series "The Champions". She has been cited as a sex symbol of the 1960s and 1970s. Bastedo was a vegetarian and animal welfare advocate.', 'hypothesis': "Sharron Macready was a popular character through the 1980's.", 'label': 'neutral', 'reason': ''} 1 {'uid': '5d2930a3-62ac-485d-94d7-4e36cbbcd7b5', 'premise': 'Alexandra Lendon Bastedo (9 March 1946 – 12 January 2014) was a British actress, best known for her role as secret agent Sharron Macready in the 1968 British espionage/science fiction adventure series "The Champions". She has been cited as a sex symbol of the 1960s and 1970s. Bastedo was a vegetarian and animal welfare advocate.', 'hypothesis': "Bastedo didn't keep any pets because of her views on animal rights.", 'label': 'neutral', 'reason': ''} 1 {'uid': '324db753-ddc9-4a85-a825-f09e2e5aebdd', 'premise': 'Alexandra Lendon Bastedo (9 March 1946 – 12 January 2014) was a British actress, best known for her role as secret agent Sharron Macready in the 1968 British espionage/science fiction adventure series "The Champions". She has been cited as a sex symbol of the 1960s and 1970s. Bastedo was a vegetarian and animal welfare advocate.', 'hypothesis': 'Alexandra Bastedo was named by her mother.', 'label': 'neutral', 'reason': ''} 1 {'uid': '4874f429-da0e-406a-90c7-22240ff3ddf8', 'premise': 'Alexandra Lendon Bastedo (9 March 1946 – 12 January 2014) was a British actress, best known for her role as secret agent Sharron Macready in the 1968 British espionage/science fiction adventure series "The Champions". She has been cited as a sex symbol of the 1960s and 1970s. Bastedo was a vegetarian and animal welfare advocate.', 'hypothesis': 'Bastedo cared for all the animals that inhabit the earth.', 'label': 'neutral', 'reason': ''} ``` Here also, the dataset was generated successfuly even hough it had same keys without any warning. The reason appears to stem from here: https://github.com/huggingface/datasets/blob/56346791aed417306d054d89bd693d6b7eab17f7/src/datasets/builder.py#L988 Here, although it has access to every key, but it is not being checked and the example is written directly: https://github.com/huggingface/datasets/blob/56346791aed417306d054d89bd693d6b7eab17f7/src/datasets/builder.py#L992 I would like to take this issue if you allow me. Thank You!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2230/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2230/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2229
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2229/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2229/comments
https://api.github.com/repos/huggingface/datasets/issues/2229/events
https://github.com/huggingface/datasets/issues/2229
859,810,602
MDU6SXNzdWU4NTk4MTA2MDI=
2,229
`xnli` dataset creating a tuple key while yielding instead of `str` or `int`
{ "login": "NikhilBartwal", "id": 42388668, "node_id": "MDQ6VXNlcjQyMzg4NjY4", "avatar_url": "https://avatars.githubusercontent.com/u/42388668?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NikhilBartwal", "html_url": "https://github.com/NikhilBartwal", "followers_url": "https://api.github.com/users/NikhilBartwal/followers", "following_url": "https://api.github.com/users/NikhilBartwal/following{/other_user}", "gists_url": "https://api.github.com/users/NikhilBartwal/gists{/gist_id}", "starred_url": "https://api.github.com/users/NikhilBartwal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NikhilBartwal/subscriptions", "organizations_url": "https://api.github.com/users/NikhilBartwal/orgs", "repos_url": "https://api.github.com/users/NikhilBartwal/repos", "events_url": "https://api.github.com/users/NikhilBartwal/events{/privacy}", "received_events_url": "https://api.github.com/users/NikhilBartwal/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! Sure sounds good. Also if you find other datasets that use tuples instead of str/int, you can also fix them !\r\nthanks :)", "@lhoestq I have sent a PR for fixing the issue. Would be great if you could have a look! Thanks!" ]
2021-04-16T13:21:53
2021-04-19T08:56:42
2021-04-19T08:56:42
CONTRIBUTOR
null
When using `ds = datasets.load_dataset('xnli', 'ar')`, the dataset generation script uses the following section of code in the egging, which yields a tuple key instead of the specified `str` or `int` key: https://github.com/huggingface/datasets/blob/56346791aed417306d054d89bd693d6b7eab17f7/datasets/xnli/xnli.py#L196 Since, community datasets in Tensorflow Datasets also use HF datasets, this causes a Tuple key error while loading HF's `xnli` dataset. I'm up for sending a fix for this, I think we can simply use `file_idx + "_" + row_idx` as a unique key instead of a tuple.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2229/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2229/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2227
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2227/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2227/comments
https://api.github.com/repos/huggingface/datasets/issues/2227/events
https://github.com/huggingface/datasets/pull/2227
859,771,526
MDExOlB1bGxSZXF1ZXN0NjE2Nzk1NjMx
2,227
Use update_metadata_with_features decorator in class_encode_column method
{ "login": "SBrandeis", "id": 33657802, "node_id": "MDQ6VXNlcjMzNjU3ODAy", "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SBrandeis", "html_url": "https://github.com/SBrandeis", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "repos_url": "https://api.github.com/users/SBrandeis/repos", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-16T12:31:41
2021-04-16T13:49:40
2021-04-16T13:49:39
CONTRIBUTOR
null
Following @mariosasko 's comment
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2227/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2227/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2227", "html_url": "https://github.com/huggingface/datasets/pull/2227", "diff_url": "https://github.com/huggingface/datasets/pull/2227.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2227.patch", "merged_at": "2021-04-16T13:49:39" }
true
https://api.github.com/repos/huggingface/datasets/issues/2226
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2226/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2226/comments
https://api.github.com/repos/huggingface/datasets/issues/2226/events
https://github.com/huggingface/datasets/issues/2226
859,720,302
MDU6SXNzdWU4NTk3MjAzMDI=
2,226
Batched map fails when removing all columns
{ "login": "villmow", "id": 2743060, "node_id": "MDQ6VXNlcjI3NDMwNjA=", "avatar_url": "https://avatars.githubusercontent.com/u/2743060?v=4", "gravatar_id": "", "url": "https://api.github.com/users/villmow", "html_url": "https://github.com/villmow", "followers_url": "https://api.github.com/users/villmow/followers", "following_url": "https://api.github.com/users/villmow/following{/other_user}", "gists_url": "https://api.github.com/users/villmow/gists{/gist_id}", "starred_url": "https://api.github.com/users/villmow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/villmow/subscriptions", "organizations_url": "https://api.github.com/users/villmow/orgs", "repos_url": "https://api.github.com/users/villmow/repos", "events_url": "https://api.github.com/users/villmow/events{/privacy}", "received_events_url": "https://api.github.com/users/villmow/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "I found the problem. I called `set_format` on some columns before. This makes it crash. Here is a complete example to reproduce:\r\n\r\n```python\r\nfrom datasets import load_dataset\r\nsst = load_dataset(\"sst\")\r\nsst.set_format(\"torch\", columns=[\"label\"], output_all_columns=True)\r\nds = sst[\"train\"]\r\n\r\n# crashes\r\nds.map(\r\n lambda x: {\"a\": list(range(20))},\r\n remove_columns=ds.column_names,\r\n load_from_cache_file=False,\r\n num_proc=1,\r\n batched=True,\r\n)\r\n```", "Thanks for reporting and for providing this code to reproduce the issue, this is really helpful !", "I merged a fix, it should work on `master` now :)\r\nWe'll do a new release soon !" ]
2021-04-16T11:17:01
2022-10-05T17:32:15
2022-10-05T17:32:15
NONE
null
Hi @lhoestq , I'm hijacking this issue, because I'm currently trying to do the approach you recommend: > Currently the optimal setup for single-column computations is probably to do something like > > ```python > result = dataset.map(f, input_columns="my_col", remove_columns=dataset.column_names) > ``` Here is my code: (see edit, in which I added a simplified version ``` This is the error: ```bash pyarrow.lib.ArrowInvalid: Column 1 named tokens expected length 8964 but got length 1000 ``` I wonder why this error occurs, when I delete every column? Can you give me a hint? ### Edit: I preprocessed my dataset before (using map with the features argument) and saved it to disk. May this be part of the error? I can iterate over the complete dataset and print every sample before calling map. There seems to be no other problem with the dataset. I tried to simplify the code that crashes: ```python # works log.debug(dataset.column_names) log.debug(dataset) for i, sample in enumerate(dataset): log.debug(i, sample) # crashes counted_dataset = dataset.map( lambda x: {"a": list(range(20))}, input_columns=column, remove_columns=dataset.column_names, load_from_cache_file=False, num_proc=num_workers, batched=True, ) ``` ``` pyarrow.lib.ArrowInvalid: Column 1 named tokens expected length 20 but got length 1000 ``` Edit2: May this be a problem with a schema I set when preprocessing the dataset before? I tried to add the `features` argument to the function and then I get a new error: ```python # crashes counted_dataset = dataset.map( lambda x: {"a": list(range(20))}, input_columns=column, remove_columns=dataset.column_names, load_from_cache_file=False, num_proc=num_workers, batched=True, features=datasets.Features( { "a": datasets.Sequence(datasets.Value("int32")) } ) ) ``` ``` File "env/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 1704, in _map_single writer.write_batch(batch) File "env/lib/python3.8/site-packages/datasets/arrow_writer.py", line 312, in write_batch col_type = schema.field(col).type if schema is not None else None File "pyarrow/types.pxi", line 1341, in pyarrow.lib.Schema.field KeyError: 'Column tokens does not exist in schema' ``` _Originally posted by @villmow in https://github.com/huggingface/datasets/issues/2193#issuecomment-820230874_
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2226/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2226/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2225
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2225/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2225/comments
https://api.github.com/repos/huggingface/datasets/issues/2225/events
https://github.com/huggingface/datasets/pull/2225
858,469,561
MDExOlB1bGxSZXF1ZXN0NjE1NzAzMTY4
2,225
fixed one instance of 'train' to 'test'
{ "login": "alexwdong", "id": 46733535, "node_id": "MDQ6VXNlcjQ2NzMzNTM1", "avatar_url": "https://avatars.githubusercontent.com/u/46733535?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alexwdong", "html_url": "https://github.com/alexwdong", "followers_url": "https://api.github.com/users/alexwdong/followers", "following_url": "https://api.github.com/users/alexwdong/following{/other_user}", "gists_url": "https://api.github.com/users/alexwdong/gists{/gist_id}", "starred_url": "https://api.github.com/users/alexwdong/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alexwdong/subscriptions", "organizations_url": "https://api.github.com/users/alexwdong/orgs", "repos_url": "https://api.github.com/users/alexwdong/repos", "events_url": "https://api.github.com/users/alexwdong/events{/privacy}", "received_events_url": "https://api.github.com/users/alexwdong/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-15T04:26:40
2021-04-15T22:09:50
2021-04-15T21:19:09
CONTRIBUTOR
null
I believe this should be 'test' instead of 'train'
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2225/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2225/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2225", "html_url": "https://github.com/huggingface/datasets/pull/2225", "diff_url": "https://github.com/huggingface/datasets/pull/2225.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2225.patch", "merged_at": "2021-04-15T21:19:09" }
true
https://api.github.com/repos/huggingface/datasets/issues/2223
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2223/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2223/comments
https://api.github.com/repos/huggingface/datasets/issues/2223/events
https://github.com/huggingface/datasets/pull/2223
857,870,800
MDExOlB1bGxSZXF1ZXN0NjE1MjE4MDIz
2,223
Set test cache config
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-14T12:55:24
2021-04-15T19:11:25
2021-04-15T19:11:25
MEMBER
null
Currently, running the tests populates the default cache directory `"~/.cache"`. This PR monkey-patches the config to set the cache directory within the temporary test directory, avoiding side effects.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2223/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2223/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2223", "html_url": "https://github.com/huggingface/datasets/pull/2223", "diff_url": "https://github.com/huggingface/datasets/pull/2223.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2223.patch", "merged_at": "2021-04-15T19:11:25" }
true
https://api.github.com/repos/huggingface/datasets/issues/2222
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2222/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2222/comments
https://api.github.com/repos/huggingface/datasets/issues/2222/events
https://github.com/huggingface/datasets/pull/2222
857,847,231
MDExOlB1bGxSZXF1ZXN0NjE1MTk5MTM5
2,222
Fix too long WindowsFileLock name
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892913, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEz", "url": "https://api.github.com/repos/huggingface/datasets/labels/wontfix", "name": "wontfix", "color": "ffffff", "default": true, "description": "This will not be worked on" } ]
closed
false
null
[]
null
[]
2021-04-14T12:26:52
2021-04-14T15:00:25
2021-04-14T14:46:19
MEMBER
null
Fix WindowsFileLock name longer than allowed MAX_PATH by shortening the basename.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2222/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2222/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2222", "html_url": "https://github.com/huggingface/datasets/pull/2222", "diff_url": "https://github.com/huggingface/datasets/pull/2222.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2222.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2221
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2221/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2221/comments
https://api.github.com/repos/huggingface/datasets/issues/2221/events
https://github.com/huggingface/datasets/pull/2221
857,833,770
MDExOlB1bGxSZXF1ZXN0NjE1MTg4MTE5
2,221
Add SLR70 - SLR80 and SLR86 to OpenSLR dataset
{ "login": "cahya-wirawan", "id": 7669893, "node_id": "MDQ6VXNlcjc2Njk4OTM=", "avatar_url": "https://avatars.githubusercontent.com/u/7669893?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cahya-wirawan", "html_url": "https://github.com/cahya-wirawan", "followers_url": "https://api.github.com/users/cahya-wirawan/followers", "following_url": "https://api.github.com/users/cahya-wirawan/following{/other_user}", "gists_url": "https://api.github.com/users/cahya-wirawan/gists{/gist_id}", "starred_url": "https://api.github.com/users/cahya-wirawan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cahya-wirawan/subscriptions", "organizations_url": "https://api.github.com/users/cahya-wirawan/orgs", "repos_url": "https://api.github.com/users/cahya-wirawan/repos", "events_url": "https://api.github.com/users/cahya-wirawan/events{/privacy}", "received_events_url": "https://api.github.com/users/cahya-wirawan/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-14T12:09:18
2021-04-14T13:50:19
2021-04-14T13:50:19
CONTRIBUTOR
null
I would like to add SLR70, SLR71, SLR72, SLR73, SLR74, SLR75, SLR76, SLR77, SLR78, SLR79, SLR80 and SLR86 to OpenSLR dataset. The languages are: Nigerian English, Chilean Spanish, Columbian Spanish, Peruvian Spanish, Puerto Rico Spanish, Venezuelan Spanish, Basque, Galician, Gujarati and Kannada.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2221/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2221/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2221", "html_url": "https://github.com/huggingface/datasets/pull/2221", "diff_url": "https://github.com/huggingface/datasets/pull/2221.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2221.patch", "merged_at": "2021-04-14T13:50:19" }
true
https://api.github.com/repos/huggingface/datasets/issues/2220
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2220/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2220/comments
https://api.github.com/repos/huggingface/datasets/issues/2220/events
https://github.com/huggingface/datasets/pull/2220
857,774,626
MDExOlB1bGxSZXF1ZXN0NjE1MTM4NDQz
2,220
Fix infinite loop in WindowsFileLock
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892913, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEz", "url": "https://api.github.com/repos/huggingface/datasets/labels/wontfix", "name": "wontfix", "color": "ffffff", "default": true, "description": "This will not be worked on" } ]
closed
false
null
[]
null
[]
2021-04-14T10:49:58
2021-04-14T14:59:50
2021-04-14T14:59:34
MEMBER
null
Raise exception to avoid infinite loop.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2220/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2220/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2220", "html_url": "https://github.com/huggingface/datasets/pull/2220", "diff_url": "https://github.com/huggingface/datasets/pull/2220.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2220.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2219
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2219/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2219/comments
https://api.github.com/repos/huggingface/datasets/issues/2219/events
https://github.com/huggingface/datasets/pull/2219
857,321,242
MDExOlB1bGxSZXF1ZXN0NjE0NzYxMzA3
2,219
Added CUAD dataset
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-13T21:05:03
2021-04-24T14:25:51
2021-04-16T08:50:44
CONTRIBUTOR
null
Dataset link : https://github.com/TheAtticusProject/cuad/ Working on README.md currently. Closes #2084 and [#1](https://github.com/TheAtticusProject/cuad/issues/1).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2219/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2219/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2219", "html_url": "https://github.com/huggingface/datasets/pull/2219", "diff_url": "https://github.com/huggingface/datasets/pull/2219.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2219.patch", "merged_at": "2021-04-16T08:50:44" }
true
https://api.github.com/repos/huggingface/datasets/issues/2217
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2217/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2217/comments
https://api.github.com/repos/huggingface/datasets/issues/2217/events
https://github.com/huggingface/datasets/pull/2217
857,011,314
MDExOlB1bGxSZXF1ZXN0NjE0NTAxNjIz
2,217
Revert breaking change in cache_files property
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-13T14:20:04
2021-04-14T14:24:24
2021-04-14T14:24:23
MEMBER
null
#2025 changed the format of `Dataset.cache_files`. Before it was formatted like ```python [{"filename": "path/to/file.arrow", "start": 0, "end": 1337}] ``` and it was changed to ```python ["path/to/file.arrow"] ``` since there's no start/end offsets available anymore. To make this less breaking, I'm setting the format back to a list of dicts: ```python [{"filename": "path/to/file.arrow"}] ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2217/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2217/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2217", "html_url": "https://github.com/huggingface/datasets/pull/2217", "diff_url": "https://github.com/huggingface/datasets/pull/2217.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2217.patch", "merged_at": "2021-04-14T14:24:23" }
true
https://api.github.com/repos/huggingface/datasets/issues/2216
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2216/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2216/comments
https://api.github.com/repos/huggingface/datasets/issues/2216/events
https://github.com/huggingface/datasets/pull/2216
856,955,534
MDExOlB1bGxSZXF1ZXN0NjE0NDU0MjE1
2,216
added real label for glue/mrpc to test set
{ "login": "philschmid", "id": 32632186, "node_id": "MDQ6VXNlcjMyNjMyMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/32632186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/philschmid", "html_url": "https://github.com/philschmid", "followers_url": "https://api.github.com/users/philschmid/followers", "following_url": "https://api.github.com/users/philschmid/following{/other_user}", "gists_url": "https://api.github.com/users/philschmid/gists{/gist_id}", "starred_url": "https://api.github.com/users/philschmid/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/philschmid/subscriptions", "organizations_url": "https://api.github.com/users/philschmid/orgs", "repos_url": "https://api.github.com/users/philschmid/repos", "events_url": "https://api.github.com/users/philschmid/events{/privacy}", "received_events_url": "https://api.github.com/users/philschmid/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-13T13:20:20
2021-04-13T13:53:20
2021-04-13T13:53:19
MEMBER
null
Added real label to `glue.py` `mrpc` task for test split.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2216/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2216/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2216", "html_url": "https://github.com/huggingface/datasets/pull/2216", "diff_url": "https://github.com/huggingface/datasets/pull/2216.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2216.patch", "merged_at": "2021-04-13T13:53:19" }
true
https://api.github.com/repos/huggingface/datasets/issues/2215
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2215/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2215/comments
https://api.github.com/repos/huggingface/datasets/issues/2215/events
https://github.com/huggingface/datasets/pull/2215
856,716,791
MDExOlB1bGxSZXF1ZXN0NjE0MjUyNTEy
2,215
Add datasets SLR35 and SLR36 to OpenSLR
{ "login": "cahya-wirawan", "id": 7669893, "node_id": "MDQ6VXNlcjc2Njk4OTM=", "avatar_url": "https://avatars.githubusercontent.com/u/7669893?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cahya-wirawan", "html_url": "https://github.com/cahya-wirawan", "followers_url": "https://api.github.com/users/cahya-wirawan/followers", "following_url": "https://api.github.com/users/cahya-wirawan/following{/other_user}", "gists_url": "https://api.github.com/users/cahya-wirawan/gists{/gist_id}", "starred_url": "https://api.github.com/users/cahya-wirawan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cahya-wirawan/subscriptions", "organizations_url": "https://api.github.com/users/cahya-wirawan/orgs", "repos_url": "https://api.github.com/users/cahya-wirawan/repos", "events_url": "https://api.github.com/users/cahya-wirawan/events{/privacy}", "received_events_url": "https://api.github.com/users/cahya-wirawan/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-13T08:24:07
2021-04-13T14:05:14
2021-04-13T14:05:14
CONTRIBUTOR
null
I would like to add [SLR35](https://openslr.org/35/) (18GB) and [SLR36](https://openslr.org/36/) (22GB) which are Large Javanese and Sundanese ASR training data set collected by Google in collaboration with Reykjavik University and Universitas Gadjah Mada in Indonesia.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2215/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2215/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2215", "html_url": "https://github.com/huggingface/datasets/pull/2215", "diff_url": "https://github.com/huggingface/datasets/pull/2215.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2215.patch", "merged_at": "2021-04-13T14:05:14" }
true
https://api.github.com/repos/huggingface/datasets/issues/2214
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2214/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2214/comments
https://api.github.com/repos/huggingface/datasets/issues/2214/events
https://github.com/huggingface/datasets/issues/2214
856,333,657
MDU6SXNzdWU4NTYzMzM2NTc=
2,214
load_metric error: module 'datasets.utils.file_utils' has no attribute 'add_start_docstrings'
{ "login": "nsaphra", "id": 414788, "node_id": "MDQ6VXNlcjQxNDc4OA==", "avatar_url": "https://avatars.githubusercontent.com/u/414788?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nsaphra", "html_url": "https://github.com/nsaphra", "followers_url": "https://api.github.com/users/nsaphra/followers", "following_url": "https://api.github.com/users/nsaphra/following{/other_user}", "gists_url": "https://api.github.com/users/nsaphra/gists{/gist_id}", "starred_url": "https://api.github.com/users/nsaphra/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nsaphra/subscriptions", "organizations_url": "https://api.github.com/users/nsaphra/orgs", "repos_url": "https://api.github.com/users/nsaphra/repos", "events_url": "https://api.github.com/users/nsaphra/events{/privacy}", "received_events_url": "https://api.github.com/users/nsaphra/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @nsaphra, thanks for reporting.\r\n\r\nThis issue was fixed in `datasets` version 1.3.0. Could you please update `datasets` and tell me if the problem persists?\r\n```shell\r\npip install -U datasets\r\n```", "There might be a bug in the conda version of `datasets` 1.2.1 where the datasets/metric scripts are downloaded from `master` instead of the `1.2.1` repo.\r\n\r\nYou can try setting the env var `HF_SCRIPTS_VERSION=\"1.2.1\"` as a workaround. Let me know if that helps.", "I just faced the same issue. I was using 1.2.1 from conda and received the same AttributeError complaining about 'add_start_docstrings'. Uninstalling the conda installed datasets and then installing the latest datasets (version 1.5.0) using pip install solved the issue for me. I don't like mixing up conda and pip installs in the same environments but this will have to do for now, until 1.5.0 is made available through conda.", "Yep, seems to have fixed things! The conda package could really do with an update. Thanks!" ]
2021-04-12T20:26:01
2021-04-23T15:20:02
2021-04-23T15:20:02
NONE
null
I'm having the same problem as [Notebooks issue 10](https://github.com/huggingface/notebooks/issues/10) on datasets 1.2.1, and it seems to be an issue with the datasets package. ```python >>> from datasets import load_metric >>> metric = load_metric("glue", "sst2") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/ext3/miniconda3/lib/python3.8/site-packages/datasets-1.2.1-py3.8.egg/datasets/load.py", line 502, in load_metric File "/ext3/miniconda3/lib/python3.8/site-packages/datasets-1.2.1-py3.8.egg/datasets/load.py", line 66, in import_main_class File "/ext3/miniconda3/lib/python3.8/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1014, in _gcd_import File "<frozen importlib._bootstrap>", line 991, in _find_and_load File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 671, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 783, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/home/ns4008/.cache/huggingface/modules/datasets_modules/metrics/glue/e4606ab9804a36bcd5a9cebb2cb65bb14b6ac78ee9e6d5981fa679a495dd55de/glue.py", line 105, in <module> @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) AttributeError: module 'datasets.utils.file_utils' has no attribute 'add_start_docstrings' ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2214/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2214/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2213
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2213/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2213/comments
https://api.github.com/repos/huggingface/datasets/issues/2213/events
https://github.com/huggingface/datasets/pull/2213
856,025,320
MDExOlB1bGxSZXF1ZXN0NjEzNjcwODk2
2,213
Fix lc_quad download checksum
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-12T14:16:59
2021-04-14T22:04:54
2021-04-14T13:42:25
CONTRIBUTOR
null
Fixes #2211
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2213/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2213/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2213", "html_url": "https://github.com/huggingface/datasets/pull/2213", "diff_url": "https://github.com/huggingface/datasets/pull/2213.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2213.patch", "merged_at": "2021-04-14T13:42:25" }
true
https://api.github.com/repos/huggingface/datasets/issues/2211
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2211/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2211/comments
https://api.github.com/repos/huggingface/datasets/issues/2211/events
https://github.com/huggingface/datasets/issues/2211
855,988,410
MDU6SXNzdWU4NTU5ODg0MTA=
2,211
Getting checksum error when trying to load lc_quad dataset
{ "login": "hanss0n", "id": 21348833, "node_id": "MDQ6VXNlcjIxMzQ4ODMz", "avatar_url": "https://avatars.githubusercontent.com/u/21348833?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hanss0n", "html_url": "https://github.com/hanss0n", "followers_url": "https://api.github.com/users/hanss0n/followers", "following_url": "https://api.github.com/users/hanss0n/following{/other_user}", "gists_url": "https://api.github.com/users/hanss0n/gists{/gist_id}", "starred_url": "https://api.github.com/users/hanss0n/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hanss0n/subscriptions", "organizations_url": "https://api.github.com/users/hanss0n/orgs", "repos_url": "https://api.github.com/users/hanss0n/repos", "events_url": "https://api.github.com/users/hanss0n/events{/privacy}", "received_events_url": "https://api.github.com/users/hanss0n/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi,\r\n\r\nI've already opened a PR with the fix. If you are in a hurry, just build the project from source and run:\r\n```bash\r\ndatasets-cli test datasets/lc_quad --save_infos --all_configs --ignore_verifications\r\n```\r\n\r\n", "Ah sorry, I tried searching but couldn't find any related PR. \r\n\r\nThank you! " ]
2021-04-12T13:38:58
2021-04-14T13:42:25
2021-04-14T13:42:25
NONE
null
I'm having issues loading the [lc_quad](https://huggingface.co/datasets/fquad) dataset by running: ```Python lc_quad = load_dataset("lc_quad") ``` which is giving me the following error: ``` Using custom data configuration default Downloading and preparing dataset lc_quad/default (download: 3.69 MiB, generated: 19.77 MiB, post-processed: Unknown size, total: 23.46 MiB) to /root/.cache/huggingface/datasets/lc_quad/default/2.0.0/5a98fe174603f5dec6df07edf1c2b4d2317210d2ad61f5a393839bca4d64e5a7... --------------------------------------------------------------------------- NonMatchingChecksumError Traceback (most recent call last) <ipython-input-42-404ace83f73c> in <module>() ----> 1 lc_quad = load_dataset("lc_quad") 3 frames /usr/local/lib/python3.7/dist-packages/datasets/utils/info_utils.py in verify_checksums(expected_checksums, recorded_checksums, verification_name) 37 if len(bad_urls) > 0: 38 error_msg = "Checksums didn't match" + for_verification_name + ":\n" ---> 39 raise NonMatchingChecksumError(error_msg + str(bad_urls)) 40 logger.info("All the checksums matched successfully" + for_verification_name) 41 NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://github.com/AskNowQA/LC-QuAD2.0/archive/master.zip'] ``` Does anyone know why this could be and how I fix it?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2211/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2211/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2210
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2210/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2210/comments
https://api.github.com/repos/huggingface/datasets/issues/2210/events
https://github.com/huggingface/datasets/issues/2210
855,709,400
MDU6SXNzdWU4NTU3MDk0MDA=
2,210
dataloading slow when using HUGE dataset
{ "login": "hwijeen", "id": 29157715, "node_id": "MDQ6VXNlcjI5MTU3NzE1", "avatar_url": "https://avatars.githubusercontent.com/u/29157715?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hwijeen", "html_url": "https://github.com/hwijeen", "followers_url": "https://api.github.com/users/hwijeen/followers", "following_url": "https://api.github.com/users/hwijeen/following{/other_user}", "gists_url": "https://api.github.com/users/hwijeen/gists{/gist_id}", "starred_url": "https://api.github.com/users/hwijeen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hwijeen/subscriptions", "organizations_url": "https://api.github.com/users/hwijeen/orgs", "repos_url": "https://api.github.com/users/hwijeen/repos", "events_url": "https://api.github.com/users/hwijeen/events{/privacy}", "received_events_url": "https://api.github.com/users/hwijeen/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! Yes this is an issue with `datasets<=1.5.0`\r\nThis issue has been fixed by #2122 , we'll do a new release soon :)\r\nFor now you can test it on the `master` branch.", "Hi, thank you for your answer. I did not realize that my issue stems from the same problem. " ]
2021-04-12T08:33:02
2021-04-13T02:03:05
2021-04-13T02:03:05
NONE
null
Hi, When I use datasets with 600GB data, the dataloading speed increases significantly. I am experimenting with two datasets, and one is about 60GB and the other 600GB. Simply speaking, my code uses `datasets.set_format("torch")` function and let pytorch-lightning handle ddp training. When looking at the pytorch-lightning supported profile of two different runs, I see that fetching a batch(`get_train_batch`) consumes an unreasonable amount of time when data is large. What could be the cause? * 60GB data ``` Action | Mean duration (s) |Num calls | Total time (s) | Percentage % | ------------------------------------------------------------------------------------------------------------------------------------ Total | - |_ | 200.33 | 100 % | ------------------------------------------------------------------------------------------------------------------------------------ run_training_epoch | 71.994 |1 | 71.994 | 35.937 | run_training_batch | 0.64373 |100 | 64.373 | 32.133 | optimizer_step_and_closure_0 | 0.64322 |100 | 64.322 | 32.108 | training_step_and_backward | 0.61004 |100 | 61.004 | 30.452 | model_backward | 0.37552 |100 | 37.552 | 18.745 | model_forward | 0.22813 |100 | 22.813 | 11.387 | training_step | 0.22759 |100 | 22.759 | 11.361 | get_train_batch | 0.066385 |100 | 6.6385 | 3.3138 | ``` * 600GB data ``` Action | Mean duration (s) |Num calls | Total time (s) | Percentage % | ------------------------------------------------------------------------------------------------------------------------------------ Total | - |_ | 3285.6 | 100 % | ------------------------------------------------------------------------------------------------------------------------------------ run_training_epoch | 1397.9 |1 | 1397.9 | 42.546 | run_training_batch | 7.2596 |100 | 725.96 | 22.095 | optimizer_step_and_closure_0 | 7.2589 |100 | 725.89 | 22.093 | training_step_and_backward | 7.223 |100 | 722.3 | 21.984 | model_backward | 6.9662 |100 | 696.62 | 21.202 | get_train_batch | 6.322 |100 | 632.2 | 19.241 | model_forward | 0.24902 |100 | 24.902 | 0.75789 | training_step | 0.2485 |100 | 24.85 | 0.75633 | ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2210/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2210/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2209
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2209/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2209/comments
https://api.github.com/repos/huggingface/datasets/issues/2209/events
https://github.com/huggingface/datasets/pull/2209
855,638,232
MDExOlB1bGxSZXF1ZXN0NjEzMzQwMTI2
2,209
Add code of conduct to the project
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
[]
2021-04-12T07:16:14
2021-04-12T17:55:52
2021-04-12T17:55:52
MEMBER
null
Add code of conduct to the project and link it from README and CONTRIBUTING. This was already done in `transformers`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2209/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2209/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2209", "html_url": "https://github.com/huggingface/datasets/pull/2209", "diff_url": "https://github.com/huggingface/datasets/pull/2209.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2209.patch", "merged_at": "2021-04-12T17:55:52" }
true
https://api.github.com/repos/huggingface/datasets/issues/2208
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2208/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2208/comments
https://api.github.com/repos/huggingface/datasets/issues/2208/events
https://github.com/huggingface/datasets/pull/2208
855,343,835
MDExOlB1bGxSZXF1ZXN0NjEzMTAxMzMw
2,208
Remove Python2 leftovers
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-11T16:08:03
2021-04-14T22:05:36
2021-04-14T13:40:51
CONTRIBUTOR
null
This PR removes Python2 leftovers since this project aims for Python3.6+ (and as of 2020 Python2 is no longer officially supported)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2208/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2208/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2208", "html_url": "https://github.com/huggingface/datasets/pull/2208", "diff_url": "https://github.com/huggingface/datasets/pull/2208.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2208.patch", "merged_at": "2021-04-14T13:40:50" }
true
https://api.github.com/repos/huggingface/datasets/issues/2207
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2207/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2207/comments
https://api.github.com/repos/huggingface/datasets/issues/2207/events
https://github.com/huggingface/datasets/issues/2207
855,267,383
MDU6SXNzdWU4NTUyNjczODM=
2,207
making labels consistent across the datasets
{ "login": "dorost1234", "id": 79165106, "node_id": "MDQ6VXNlcjc5MTY1MTA2", "avatar_url": "https://avatars.githubusercontent.com/u/79165106?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dorost1234", "html_url": "https://github.com/dorost1234", "followers_url": "https://api.github.com/users/dorost1234/followers", "following_url": "https://api.github.com/users/dorost1234/following{/other_user}", "gists_url": "https://api.github.com/users/dorost1234/gists{/gist_id}", "starred_url": "https://api.github.com/users/dorost1234/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dorost1234/subscriptions", "organizations_url": "https://api.github.com/users/dorost1234/orgs", "repos_url": "https://api.github.com/users/dorost1234/repos", "events_url": "https://api.github.com/users/dorost1234/events{/privacy}", "received_events_url": "https://api.github.com/users/dorost1234/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! The ClassLabel feature type encodes the labels as integers.\r\nThe integer corresponds to the index of the label name in the `names` list of the ClassLabel.\r\nHere that means that the labels are 'entailment' (0), 'neutral' (1), 'contradiction' (2).\r\n\r\nYou can get the label names back by using `a.features['label'].int2str(i)`.\r\n", "Hi! You can also easily reorder the label with the [`Dataset.align_labels_with_mapping`](https://huggingface.co/docs/datasets/master/en/process#align) method." ]
2021-04-11T10:03:56
2022-06-01T16:23:08
2022-06-01T16:21:10
NONE
null
Hi For accessing the labels one can type ``` >>> a.features['label'] ClassLabel(num_classes=3, names=['entailment', 'neutral', 'contradiction'], names_file=None, id=None) ``` The labels however are not consistent with the actual labels sometimes, for instance in case of XNLI, the actual labels are 0,1,2, but if one try to access as above they are entailment, neutral,contradiction, it would be great to have the labels consistent. thanks
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2207/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2207/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2206
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2206/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2206/comments
https://api.github.com/repos/huggingface/datasets/issues/2206/events
https://github.com/huggingface/datasets/issues/2206
855,252,415
MDU6SXNzdWU4NTUyNTI0MTU=
2,206
Got pyarrow error when loading a dataset while adding special tokens into the tokenizer
{ "login": "yana-xuyan", "id": 38536635, "node_id": "MDQ6VXNlcjM4NTM2NjM1", "avatar_url": "https://avatars.githubusercontent.com/u/38536635?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yana-xuyan", "html_url": "https://github.com/yana-xuyan", "followers_url": "https://api.github.com/users/yana-xuyan/followers", "following_url": "https://api.github.com/users/yana-xuyan/following{/other_user}", "gists_url": "https://api.github.com/users/yana-xuyan/gists{/gist_id}", "starred_url": "https://api.github.com/users/yana-xuyan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yana-xuyan/subscriptions", "organizations_url": "https://api.github.com/users/yana-xuyan/orgs", "repos_url": "https://api.github.com/users/yana-xuyan/repos", "events_url": "https://api.github.com/users/yana-xuyan/events{/privacy}", "received_events_url": "https://api.github.com/users/yana-xuyan/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi,\r\n\r\nthe output of the tokenizers is treated specially in the lib to optimize the dataset size (see the code [here](https://github.com/huggingface/datasets/blob/master/src/datasets/arrow_writer.py#L138-L141)). It looks like that one of the values in a dictionary returned by the tokenizer is out of the assumed range.\r\nCan you please provide a minimal reproducible example for more help?", "Hi @yana-xuyan, thanks for reporting.\r\n\r\nAs clearly @mariosasko explained, `datasets` performs some optimizations in order to reduce the size of the dataset cache files. And one of them is storing the field `special_tokens_mask` as `int8`, which means that this field can only contain integers between `-128` to `127`. As your message error states, one of the values of this field is `50259`, and therefore it cannot be stored as an `int8`.\r\n\r\nMaybe we could implement a way to disable this optimization and allow using any integer value; although the size of the cache files would be much larger.", "I'm facing same issue @mariosasko @albertvillanova \r\n\r\n```\r\nArrowInvalid: Integer value 50260 not in range: -128 to 127\r\n```\r\n\r\nTo reproduce:\r\n```python\r\nSPECIAL_TOKENS = ['<bos>','<eos>','<speaker1>','<speaker2>','<pad>']\r\nATTR_TO_SPECIAL_TOKEN = {\r\n 'bos_token': '<bos>', \r\n 'eos_token': '<eos>', \r\n 'pad_token': '<pad>',\r\n 'additional_special_tokens': ['<speaker1>', '<speaker2>']\r\n }\r\n\r\ntokenizer = AutoTokenizer.from_pretrained(\"gpt2\", use_fast=False)\r\nnum_added_tokens =tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN)\r\nvocab_size = len(self.tokenizer.encoder) + num_added_tokens\r\nvocab =tokenizer.get_vocab()\r\n\r\npad_index = tokenizer.pad_token_id\r\neos_index = tokenizer.eos_token_id\r\nbos_index = tokenizer.bos_token_id\r\nspeaker1_index = vocab[\"<speaker1>\"]\r\nspeaker2_index = vocab[\"<speaker2>\"]\r\n```\r\n\r\n```python\r\ntokenizer.decode(['50260'])\r\n'<speaker1>'\r\n```", "@mariosasko \r\nI am hitting this bug in the Bert tokenizer too. I see that @albertvillanova labeled this as a bug back in April. Has there been a fix released yet?\r\nWhat I did for now is to just disable the optimization in the HF library. @yana-xuyan and @thomas-happify, is that what you did and did that work for you?\r\n\r\n", "Hi @gregg-ADP, \r\n\r\nThis is still a bug.\r\n\r\nAs @albertvillanova has suggested, maybe it's indeed worth adding a variable to `config.py` to have a way to disable this behavior.\r\n\r\nIn the meantime, this forced optimization can be disabled by specifying `features` (of the returned examples) in the `map` call:\r\n```python\r\nfrom datasets import *\r\n... # dataset init\r\nds.map(process_example, features=Features({\"special_tokens_mask\": Sequence(Value(\"int32\")), ... rest of the features}) \r\n```\r\n\r\ncc @lhoestq so he is also aware of this issue", "Thanks for the quick reply @mariosasko. What I did was to changed the optimizer to use int32 instead of int8. \r\nWhat you're suggesting specifies the type for each feature explicitly without changing the HF code. This is definitely a better option. However, we are hitting a new error later:\r\n```\r\n File \"/Users/ccccc/PycharmProjects/aaaa-ml/venv-source/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1051, in _call_impl\r\n return forward_call(*input, **kwargs)\r\nTypeError: forward() got an unexpected keyword argument 'pos'\r\n\r\n```\r\nWhere 'pos' is the name of a new feature we added. Do you agree that your way of fixing the optimizer issue will not fix our new issue? If not, I will continue with this optimizer fix until we resolve our other issue.\r\n", "Hi @gwc4github,\r\n\r\nthe fix was merged a few minutes ago, and it doesn't require any changes on the user side (e.g. no need for specifying `features`). If you find time, feel free to install `datasets` from master with:\r\n```\r\npip install git+https://github.com/huggingface/datasets.git\r\n```\r\nand let us know if it works for your use case! " ]
2021-04-11T08:40:09
2021-11-10T12:18:30
2021-11-10T12:04:28
NONE
null
I added five more special tokens into the GPT2 tokenizer. But after that, when I try to pre-process the data using my previous code, I got an error shown below: Traceback (most recent call last): File "/home/xuyan/anaconda3/envs/convqa/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 1687, in _map_single writer.write(example) File "/home/xuyan/anaconda3/envs/convqa/lib/python3.7/site-packages/datasets/arrow_writer.py", line 296, in write self.write_on_file() File "/home/xuyan/anaconda3/envs/convqa/lib/python3.7/site-packages/datasets/arrow_writer.py", line 270, in write_on_file pa_array = pa.array(typed_sequence) File "pyarrow/array.pxi", line 222, in pyarrow.lib.array File "pyarrow/array.pxi", line 110, in pyarrow.lib._handle_arrow_array_protocol File "/home/xuyan/anaconda3/envs/convqa/lib/python3.7/site-packages/datasets/arrow_writer.py", line 108, in __arrow_array__ out = out.cast(pa.list_(self.optimized_int_type)) File "pyarrow/array.pxi", line 810, in pyarrow.lib.Array.cast File "/home/xuyan/anaconda3/envs/convqa/lib/python3.7/site-packages/pyarrow/compute.py", line 281, in cast return call_function("cast", [arr], options) File "pyarrow/_compute.pyx", line 465, in pyarrow._compute.call_function File "pyarrow/_compute.pyx", line 294, in pyarrow._compute.Function.call File "pyarrow/error.pxi", line 122, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 84, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Integer value 50259 not in range: -128 to 127 Do you have any idea about it?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2206/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2206/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2205
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2205/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2205/comments
https://api.github.com/repos/huggingface/datasets/issues/2205/events
https://github.com/huggingface/datasets/pull/2205
855,207,605
MDExOlB1bGxSZXF1ZXN0NjEzMDAwMzYw
2,205
Updating citation information on LinCE readme
{ "login": "gaguilar", "id": 5833357, "node_id": "MDQ6VXNlcjU4MzMzNTc=", "avatar_url": "https://avatars.githubusercontent.com/u/5833357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gaguilar", "html_url": "https://github.com/gaguilar", "followers_url": "https://api.github.com/users/gaguilar/followers", "following_url": "https://api.github.com/users/gaguilar/following{/other_user}", "gists_url": "https://api.github.com/users/gaguilar/gists{/gist_id}", "starred_url": "https://api.github.com/users/gaguilar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gaguilar/subscriptions", "organizations_url": "https://api.github.com/users/gaguilar/orgs", "repos_url": "https://api.github.com/users/gaguilar/repos", "events_url": "https://api.github.com/users/gaguilar/events{/privacy}", "received_events_url": "https://api.github.com/users/gaguilar/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-11T03:18:05
2021-04-12T17:53:34
2021-04-12T17:53:34
CONTRIBUTOR
null
Hi! I just updated the citation information in this PR. It had an additional bibtex from one of the datasets used in LinCE and then the LinCE bibtex. I removed the former and added a link that shows the full list of citations for each dataset. Thanks!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2205/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2205/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2205", "html_url": "https://github.com/huggingface/datasets/pull/2205", "diff_url": "https://github.com/huggingface/datasets/pull/2205.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2205.patch", "merged_at": "2021-04-12T17:53:34" }
true
https://api.github.com/repos/huggingface/datasets/issues/2204
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2204/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2204/comments
https://api.github.com/repos/huggingface/datasets/issues/2204/events
https://github.com/huggingface/datasets/pull/2204
855,144,431
MDExOlB1bGxSZXF1ZXN0NjEyOTU1MzM2
2,204
Add configurable options to `seqeval` metric
{ "login": "marrodion", "id": 44571847, "node_id": "MDQ6VXNlcjQ0NTcxODQ3", "avatar_url": "https://avatars.githubusercontent.com/u/44571847?v=4", "gravatar_id": "", "url": "https://api.github.com/users/marrodion", "html_url": "https://github.com/marrodion", "followers_url": "https://api.github.com/users/marrodion/followers", "following_url": "https://api.github.com/users/marrodion/following{/other_user}", "gists_url": "https://api.github.com/users/marrodion/gists{/gist_id}", "starred_url": "https://api.github.com/users/marrodion/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/marrodion/subscriptions", "organizations_url": "https://api.github.com/users/marrodion/orgs", "repos_url": "https://api.github.com/users/marrodion/repos", "events_url": "https://api.github.com/users/marrodion/events{/privacy}", "received_events_url": "https://api.github.com/users/marrodion/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-10T19:58:19
2021-04-15T13:49:46
2021-04-15T13:49:46
CONTRIBUTOR
null
Fixes #2148 Adds options to use strict mode, different schemes of evaluation, sample weight and adjust zero_division behavior, if encountered. `seqeval` provides schemes as objects, hence dynamic import from string, to avoid making the user do the import (thanks to @albertvillanova for the `importlib` idea).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2204/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2204/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2204", "html_url": "https://github.com/huggingface/datasets/pull/2204", "diff_url": "https://github.com/huggingface/datasets/pull/2204.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2204.patch", "merged_at": "2021-04-15T13:49:46" }
true
https://api.github.com/repos/huggingface/datasets/issues/2203
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2203/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2203/comments
https://api.github.com/repos/huggingface/datasets/issues/2203/events
https://github.com/huggingface/datasets/pull/2203
855,053,595
MDExOlB1bGxSZXF1ZXN0NjEyODg4MzA5
2,203
updated banking77 train and test data
{ "login": "hsali", "id": 6765330, "node_id": "MDQ6VXNlcjY3NjUzMzA=", "avatar_url": "https://avatars.githubusercontent.com/u/6765330?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hsali", "html_url": "https://github.com/hsali", "followers_url": "https://api.github.com/users/hsali/followers", "following_url": "https://api.github.com/users/hsali/following{/other_user}", "gists_url": "https://api.github.com/users/hsali/gists{/gist_id}", "starred_url": "https://api.github.com/users/hsali/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hsali/subscriptions", "organizations_url": "https://api.github.com/users/hsali/orgs", "repos_url": "https://api.github.com/users/hsali/repos", "events_url": "https://api.github.com/users/hsali/events{/privacy}", "received_events_url": "https://api.github.com/users/hsali/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-10T12:10:10
2021-04-23T14:33:39
2021-04-23T14:33:39
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2203/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2203/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2203", "html_url": "https://github.com/huggingface/datasets/pull/2203", "diff_url": "https://github.com/huggingface/datasets/pull/2203.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2203.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2202
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2202/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2202/comments
https://api.github.com/repos/huggingface/datasets/issues/2202/events
https://github.com/huggingface/datasets/pull/2202
854,501,109
MDExOlB1bGxSZXF1ZXN0NjEyNDM2ODMx
2,202
Add classes GenerateMode, DownloadConfig and Version to the documentation
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-09T12:58:19
2021-04-12T17:58:00
2021-04-12T17:57:59
MEMBER
null
Add documentation for classes `GenerateMode`, `DownloadConfig` and `Version`. Update the docstring of `load_dataset` to create cross-reference links to the classes. Related to #2187.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2202/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2202/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2202", "html_url": "https://github.com/huggingface/datasets/pull/2202", "diff_url": "https://github.com/huggingface/datasets/pull/2202.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2202.patch", "merged_at": "2021-04-12T17:57:59" }
true
https://api.github.com/repos/huggingface/datasets/issues/2201
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2201/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2201/comments
https://api.github.com/repos/huggingface/datasets/issues/2201/events
https://github.com/huggingface/datasets/pull/2201
854,499,563
MDExOlB1bGxSZXF1ZXN0NjEyNDM1NTE3
2,201
Fix ArrowWriter overwriting features in ArrowBasedBuilder
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-09T12:56:19
2021-04-12T13:32:17
2021-04-12T13:32:16
MEMBER
null
This should fix the issues with CSV loading experienced in #2153 and #2200. The CSV builder is an ArrowBasedBuilder that had an issue with its ArrowWriter used to write the arrow file from the csv data. The writer wasn't initialized with the features passed by the user. Therefore the writer was inferring the features from the arrow data, discarding the features passed by the user. I fixed that and I updated the tests
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2201/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2201/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2201", "html_url": "https://github.com/huggingface/datasets/pull/2201", "diff_url": "https://github.com/huggingface/datasets/pull/2201.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2201.patch", "merged_at": "2021-04-12T13:32:16" }
true
https://api.github.com/repos/huggingface/datasets/issues/2200
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2200/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2200/comments
https://api.github.com/repos/huggingface/datasets/issues/2200/events
https://github.com/huggingface/datasets/issues/2200
854,449,656
MDU6SXNzdWU4NTQ0NDk2NTY=
2,200
_prepare_split will overwrite DatasetBuilder.info.features
{ "login": "Gforky", "id": 4157614, "node_id": "MDQ6VXNlcjQxNTc2MTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/4157614?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gforky", "html_url": "https://github.com/Gforky", "followers_url": "https://api.github.com/users/Gforky/followers", "following_url": "https://api.github.com/users/Gforky/following{/other_user}", "gists_url": "https://api.github.com/users/Gforky/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gforky/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gforky/subscriptions", "organizations_url": "https://api.github.com/users/Gforky/orgs", "repos_url": "https://api.github.com/users/Gforky/repos", "events_url": "https://api.github.com/users/Gforky/events{/privacy}", "received_events_url": "https://api.github.com/users/Gforky/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi ! This might be related to #2153 \r\n\r\nYou're right the ArrowWriter should be initialized with `features=self.info.features` ! Good catch\r\nI'm opening a PR to fix this and also to figure out how it was not caught in the tests\r\n\r\nEDIT: opened #2201", "> Hi ! This might be related to #2153\r\n> \r\n> You're right the ArrowWriter should be initialized with `features=self.info.features` ! Good catch\r\n> I'm opening a PR to fix this and also to figure out how it was not caught in the tests\r\n> \r\n> EDIT: opened #2201\r\n\r\nGlad to hear that! Thank you for your fix, I'm new to huggingface, it's a fantastic project 😁" ]
2021-04-09T11:47:13
2021-06-04T10:37:35
2021-06-04T10:37:35
NONE
null
Hi, here is my issue: I initialized a Csv datasetbuilder with specific features: ``` def get_dataset_features(data_args): features = {} if data_args.text_features: features.update({text_feature: hf_features.Value("string") for text_feature in data_args.text_features.strip().split(",")}) if data_args.num_features: features.update({text_feature: hf_features.Value("float32") for text_feature in data_args.num_features.strip().split(",")}) if data_args.label_classes: features["label"] = hf_features.ClassLabel(names=data_args.label_classes.strip().split(",")) else: features["label"] = hf_features.Value("float32") return hf_features.Features(features) datasets = load_dataset(extension, data_files=data_files, sep=data_args.delimiter, header=data_args.header, column_names=data_args.column_names.split(",") if data_args.column_names else None, features=get_dataset_features(data_args=data_args)) ``` The `features` is printout as below before `builder_instance.as_dataset` is called: ``` {'label': ClassLabel(num_classes=2, names=['unacceptable', 'acceptable'], names_file=None, id=None), 'notated': Value(dtype='string', id=None), 'sentence': Value(dtype='string', id=None), 'src_code': Value(dtype='string', id=None)} ```` But after the `builder_instance.as_dataset` is called for Csv dataset builder, the `features` is changed to: ``` {'label': Value(dtype='int64', id=None), 'notated': Value(dtype='string', id=None), 'sentence': Value(dtype='string', id=None), 'src_code': Value(dtype='string', id=None)} ``` After digged into the code, I releazed that in `ArrowBasedBuilder._prepare_split`, the DatasetBuilder's info's features will be overwrited by `ArrowWriter`'s `_features`. But `ArrowWriter` is initailized without passing `features`. So my concern is: It's this overwrite must be done, or, should it be an option to pass features in `_prepare_split` function?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2200/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2200/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2199
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2199/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2199/comments
https://api.github.com/repos/huggingface/datasets/issues/2199/events
https://github.com/huggingface/datasets/pull/2199
854,417,318
MDExOlB1bGxSZXF1ZXN0NjEyMzY0ODU3
2,199
Fix backward compatibility in Dataset.load_from_disk
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-09T11:01:10
2021-04-09T15:57:05
2021-04-09T15:57:05
MEMBER
null
Fix backward compatibility when loading from disk an old dataset saved to disk with indices using key "_indices_data_files". Related to #2195.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2199/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2199/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2199", "html_url": "https://github.com/huggingface/datasets/pull/2199", "diff_url": "https://github.com/huggingface/datasets/pull/2199.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2199.patch", "merged_at": "2021-04-09T15:57:05" }
true
https://api.github.com/repos/huggingface/datasets/issues/2198
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2198/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2198/comments
https://api.github.com/repos/huggingface/datasets/issues/2198/events
https://github.com/huggingface/datasets/pull/2198
854,357,481
MDExOlB1bGxSZXF1ZXN0NjEyMzE0MTIz
2,198
added file_permission in load_dataset
{ "login": "bhavitvyamalik", "id": 19718818, "node_id": "MDQ6VXNlcjE5NzE4ODE4", "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhavitvyamalik", "html_url": "https://github.com/bhavitvyamalik", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-09T09:39:06
2021-04-16T14:11:46
2021-04-16T14:11:46
CONTRIBUTOR
null
As discussed in #2065 I've added `file_permission` argument in `load_dataset`. Added mainly 2 things here: 1) Permission of downloaded datasets when converted to .arrow files can be changed with argument `file_permission` argument in `load_dataset` (default is 0o644 only) 2) Incase the user uses `map` later on to generate another cache file of dataset, it ensures the permissions of newly generated file are similar to that of` *-train.arrow` file inside cache_dir for that dataset.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2198/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2198/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2198", "html_url": "https://github.com/huggingface/datasets/pull/2198", "diff_url": "https://github.com/huggingface/datasets/pull/2198.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2198.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2197
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2197/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2197/comments
https://api.github.com/repos/huggingface/datasets/issues/2197/events
https://github.com/huggingface/datasets/pull/2197
854,356,559
MDExOlB1bGxSZXF1ZXN0NjEyMzEzMzQw
2,197
fix missing indices_files in load_form_disk
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-09T09:37:57
2021-04-09T09:54:40
2021-04-09T09:54:39
MEMBER
null
This should fix #2195 `load_from_disk` was failing if there was no "_indices_files" field in state.json. This can happen if the dataset has no indices mapping
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2197/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2197/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2197", "html_url": "https://github.com/huggingface/datasets/pull/2197", "diff_url": "https://github.com/huggingface/datasets/pull/2197.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2197.patch", "merged_at": "2021-04-09T09:54:39" }
true
https://api.github.com/repos/huggingface/datasets/issues/2196
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2196/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2196/comments
https://api.github.com/repos/huggingface/datasets/issues/2196/events
https://github.com/huggingface/datasets/issues/2196
854,126,114
MDU6SXNzdWU4NTQxMjYxMTQ=
2,196
`load_dataset` caches two arrow files?
{ "login": "hwijeen", "id": 29157715, "node_id": "MDQ6VXNlcjI5MTU3NzE1", "avatar_url": "https://avatars.githubusercontent.com/u/29157715?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hwijeen", "html_url": "https://github.com/hwijeen", "followers_url": "https://api.github.com/users/hwijeen/followers", "following_url": "https://api.github.com/users/hwijeen/following{/other_user}", "gists_url": "https://api.github.com/users/hwijeen/gists{/gist_id}", "starred_url": "https://api.github.com/users/hwijeen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hwijeen/subscriptions", "organizations_url": "https://api.github.com/users/hwijeen/orgs", "repos_url": "https://api.github.com/users/hwijeen/repos", "events_url": "https://api.github.com/users/hwijeen/events{/privacy}", "received_events_url": "https://api.github.com/users/hwijeen/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892912, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "Further information is requested" } ]
closed
false
null
[]
null
[ "Hi ! Files that starts with `cache-*` are cached computation files, i.e. they are the cached results of map/filter/cast/etc. operations. For example if you used `map` on your dataset to transform it, then the resulting dataset is going to be stored and cached in a `cache-*` file. These files are used to avoid having to load the dataset in RAM, even after many transforms", "Thanks @lhoestq! Hmm.. that's strange because I specifically turned off auto caching, and saved mapped result, using `save_to_disk`, to another location. At this location, the following file is created:`355G\tcache-ed205e500a7dc44c.arrow`\r\n\r\nTo my observation, both `load_dataset` and `map` creates `cache-*` files, and I wonder what the `cache-*` file from `load_dataset` is for (as I believe the same information is stored in `json-train.arrow`.", "This is a wrong report -- `cache-*` files are created only my `map`, not by `load_dataset`. " ]
2021-04-09T03:49:19
2021-04-12T05:25:29
2021-04-12T05:25:29
NONE
null
Hi, I am using datasets to load large json file of 587G. I checked the cached folder and found that there are two arrow files created: * `cache-ed205e500a7dc44c.arrow` - 355G * `json-train.arrow` - 582G Why is the first file created? If I delete it, would I still be able to `load_from_disk`?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2196/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2196/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2195
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2195/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2195/comments
https://api.github.com/repos/huggingface/datasets/issues/2195/events
https://github.com/huggingface/datasets/issues/2195
854,070,194
MDU6SXNzdWU4NTQwNzAxOTQ=
2,195
KeyError: '_indices_files' in `arrow_dataset.py`
{ "login": "samsontmr", "id": 15007950, "node_id": "MDQ6VXNlcjE1MDA3OTUw", "avatar_url": "https://avatars.githubusercontent.com/u/15007950?v=4", "gravatar_id": "", "url": "https://api.github.com/users/samsontmr", "html_url": "https://github.com/samsontmr", "followers_url": "https://api.github.com/users/samsontmr/followers", "following_url": "https://api.github.com/users/samsontmr/following{/other_user}", "gists_url": "https://api.github.com/users/samsontmr/gists{/gist_id}", "starred_url": "https://api.github.com/users/samsontmr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/samsontmr/subscriptions", "organizations_url": "https://api.github.com/users/samsontmr/orgs", "repos_url": "https://api.github.com/users/samsontmr/repos", "events_url": "https://api.github.com/users/samsontmr/events{/privacy}", "received_events_url": "https://api.github.com/users/samsontmr/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Thanks for reporting @samsontmr.\r\n\r\nIt seems a backward compatibility issue...", "Thanks @samsontmr this should be fixed on master now\r\n\r\nFeel free to reopen if you're still having issues" ]
2021-04-09T01:37:12
2021-04-09T09:55:09
2021-04-09T09:54:39
NONE
null
After pulling the latest master, I'm getting a crash when `load_from_disk` tries to load my local dataset. Trace: ``` Traceback (most recent call last): File "load_data.py", line 11, in <module> dataset = load_from_disk(SRC) File "/opt/conda/envs/py38/lib/python3.8/site-packages/datasets/load.py", line 784, in load_from_disk return DatasetDict.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory) File "/opt/conda/envs/py38/lib/python3.8/site-packages/datasets/dataset_dict.py", line 692, in load_from_disk dataset_dict[k] = Dataset.load_from_disk(dataset_dict_split_path, fs, keep_in_memory=keep_in_memory) File "/opt/conda/envs/py38/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 634, in load_from_disk if state["_indices_files"]: KeyError: '_indices_files' ``` I believe this is the line causing the error since there may not be a `_indices_files` key in the older versions: https://github.com/huggingface/datasets/blob/b70141e3c5149430951773aaa0155555c5fb3e76/src/datasets/arrow_dataset.py#L634 May I suggest using `state.get()` instead of directly indexing the dictionary? @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2195/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2195/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2194
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2194/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2194/comments
https://api.github.com/repos/huggingface/datasets/issues/2194/events
https://github.com/huggingface/datasets/issues/2194
853,909,452
MDU6SXNzdWU4NTM5MDk0NTI=
2,194
py3.7: TypeError: can't pickle _LazyModule objects
{ "login": "stas00", "id": 10676103, "node_id": "MDQ6VXNlcjEwNjc2MTAz", "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stas00", "html_url": "https://github.com/stas00", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "organizations_url": "https://api.github.com/users/stas00/orgs", "repos_url": "https://api.github.com/users/stas00/repos", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "received_events_url": "https://api.github.com/users/stas00/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "\r\nThis wasn't a `datasets` problem, but `transformers`' and it was solved here https://github.com/huggingface/transformers/pull/11168\r\n" ]
2021-04-08T21:02:48
2021-04-09T16:56:50
2021-04-09T01:52:57
MEMBER
null
While this works fine with py3.8, under py3.7, with a totally new conda env and transformers install: ``` git clone https://github.com/huggingface/transformers cd transformers pip install -e .[testing] export BS=1; rm -rf /tmp/test-clm; PYTHONPATH=src USE_TF=0 CUDA_VISIBLE_DEVICES=0 python \ examples/language-modeling/run_clm.py --model_name_or_path distilgpt2 --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 --do_train --max_train_samples 1 \ --per_device_train_batch_size $BS --output_dir /tmp/test-clm --block_size 128 --logging_steps 1 \ --fp16 ``` ``` Traceback (most recent call last): File "examples/language-modeling/run_clm.py", line 453, in <module> main() File "examples/language-modeling/run_clm.py", line 336, in main load_from_cache_file=not data_args.overwrite_cache, File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/dataset_dict.py", line 303, in map for k, dataset in self.items() File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/dataset_dict.py", line 303, in <dictcomp> for k, dataset in self.items() File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 1259, in map update_data=update_data, File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 157, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/fingerprint.py", line 158, in wrapper self._fingerprint, transform, kwargs_for_fingerprint File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/fingerprint.py", line 105, in update_fingerprint hasher.update(transform_args[key]) File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/fingerprint.py", line 57, in update self.m.update(self.hash(value).encode("utf-8")) File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/fingerprint.py", line 53, in hash return cls.hash_default(value) File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/fingerprint.py", line 46, in hash_default return cls.hash_bytes(dumps(value)) File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 389, in dumps dump(obj, file) File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 361, in dump Pickler(file, recurse=True).dump(obj) File "/home/stas/anaconda3/lib/python3.7/site-packages/dill/_dill.py", line 454, in dump StockPickler.dump(self, obj) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 437, in dump self.save(obj) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 504, in save f(self, obj) # Call unbound method with explicit self File "/home/stas/anaconda3/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 556, in save_function obj=obj, File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 638, in save_reduce save(args) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 504, in save f(self, obj) # Call unbound method with explicit self File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 789, in save_tuple save(element) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 504, in save f(self, obj) # Call unbound method with explicit self File "/home/stas/anaconda3/lib/python3.7/site-packages/dill/_dill.py", line 941, in save_module_dict StockPickler.save_dict(pickler, obj) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 859, in save_dict self._batch_setitems(obj.items()) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 885, in _batch_setitems save(v) File "/home/stas/anaconda3/lib/python3.7/pickle.py", line 524, in save rv = reduce(self.proto) TypeError: can't pickle _LazyModule objects ``` ``` $ python --version Python 3.7.4 $ python -m torch.utils.collect_env Collecting environment information... PyTorch version: 1.8.0.dev20210110+cu110 Is debug build: False CUDA used to build PyTorch: 11.0 ROCM used to build PyTorch: N/A OS: Ubuntu 20.04.2 LTS (x86_64) GCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 Clang version: 10.0.0-4ubuntu1 CMake version: version 3.16.3 ``` Thanks.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2194/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2194/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2193
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2193/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2193/comments
https://api.github.com/repos/huggingface/datasets/issues/2193/events
https://github.com/huggingface/datasets/issues/2193
853,725,707
MDU6SXNzdWU4NTM3MjU3MDc=
2,193
Filtering/mapping on one column is very slow
{ "login": "norabelrose", "id": 39116809, "node_id": "MDQ6VXNlcjM5MTE2ODA5", "avatar_url": "https://avatars.githubusercontent.com/u/39116809?v=4", "gravatar_id": "", "url": "https://api.github.com/users/norabelrose", "html_url": "https://github.com/norabelrose", "followers_url": "https://api.github.com/users/norabelrose/followers", "following_url": "https://api.github.com/users/norabelrose/following{/other_user}", "gists_url": "https://api.github.com/users/norabelrose/gists{/gist_id}", "starred_url": "https://api.github.com/users/norabelrose/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/norabelrose/subscriptions", "organizations_url": "https://api.github.com/users/norabelrose/orgs", "repos_url": "https://api.github.com/users/norabelrose/repos", "events_url": "https://api.github.com/users/norabelrose/events{/privacy}", "received_events_url": "https://api.github.com/users/norabelrose/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892912, "node_id": "MDU6TGFiZWwxOTM1ODkyOTEy", "url": "https://api.github.com/repos/huggingface/datasets/labels/question", "name": "question", "color": "d876e3", "default": true, "description": "Further information is requested" } ]
closed
false
null
[]
null
[ "Hi ! Yes we are working on making `filter` significantly faster. You can look at related PRs here: #2060 #2178 \r\n\r\nI think you can expect to have the fast version of `filter` available next week.\r\n\r\nWe'll make it only select one column, and we'll also make the overall filtering operation way faster by avoiding many arrow<->python conversions especially during writing.\r\n\r\nI'll let you know how it goes !", "@lhoestq Thanks for the response— it's great to hear that we'll be getting a much faster `filter` method soon. However, my use case does also involve using `map` over a single column in order to pre-compute roughly uniformly sized batches, and right now that is also very slow. Is there any plan to make `map` faster for single column operations?\r\n\r\nIf that's not a priority for the maintainers right now, I could try my hand at adding the feature, but I can't guarantee I would do a good job given my lack of familiarity with pyarrow.", "Currently the optimal setup for single-column computations is probably to do something like\r\n```python\r\nresult = dataset.map(f, input_columns=\"my_col\", remove_columns=dataset.column_names)\r\n```\r\nThis has two advantages:\r\n- input_columns=\"my_col\" allows to only read the column \"my_col\"\r\n- remove_columns=dataset.column_names makes `map` only keep the output of your function `f`, and it drops the other columns of the dataset instead of keeping them.\r\n\r\nLet me know if it improves speed on your side.\r\n\r\nYou can also get more speed by using `batched=True` and setting `num_proc=` for multiprocessing", "Hi @lhoestq ,\r\n\r\nI'm hijacking this issue, because I'm currently trying to do the approach you recommend:\r\n\r\n> Currently the optimal setup for single-column computations is probably to do something like\r\n> \r\n> ```python\r\n> result = dataset.map(f, input_columns=\"my_col\", remove_columns=dataset.column_names)\r\n> ```\r\n\r\nHere is my code: (see edit, in which I added a simplified version\r\n\r\n```\r\nThis is the error:\r\n```bash\r\npyarrow.lib.ArrowInvalid: Column 1 named tokens expected length 8964 but got length 1000\r\n```\r\nI wonder why this error occurs, when I delete every column? Can you give me a hint?\r\n\r\n### Edit:\r\nI preprocessed my dataset before (using map with the features argument) and saved it to disk. May this be part of the error? I can iterate over the\r\ncomplete dataset and print every sample before calling map. There seems to be no other problem with the dataset.\r\n\r\nI tried to simplify the code that crashes:\r\n\r\n```python\r\n# works\r\nlog.debug(dataset.column_names)\r\nlog.debug(dataset)\r\nfor i, sample in enumerate(dataset):\r\n log.debug(i, sample)\r\n\r\n# crashes\r\ncounted_dataset = dataset.map(\r\n lambda x: {\"a\": list(range(20))},\r\n input_columns=column,\r\n remove_columns=dataset.column_names,\r\n load_from_cache_file=False,\r\n num_proc=num_workers,\r\n batched=True,\r\n)\r\n```\r\n\r\n```\r\npyarrow.lib.ArrowInvalid: Column 1 named tokens expected length 20 but got length 1000\r\n```\r\n\r\nEdit2: \r\n\r\nMay this be a problem with a schema I set when preprocessing the dataset before? I tried to add the `features` argument to the function and then I get a new error:\r\n\r\n```python\r\n# crashes\r\ncounted_dataset = dataset.map(\r\n lambda x: {\"a\": list(range(20))},\r\n input_columns=column,\r\n remove_columns=dataset.column_names,\r\n load_from_cache_file=False,\r\n num_proc=num_workers,\r\n batched=True,\r\n features=datasets.Features(\r\n {\r\n \"a\": datasets.Sequence(datasets.Value(\"int32\"))\r\n }\r\n )\r\n)\r\n```\r\n\r\n```\r\n File \"env/lib/python3.8/site-packages/datasets/arrow_dataset.py\", line 1704, in _map_single\r\n writer.write_batch(batch)\r\n File \"env/lib/python3.8/site-packages/datasets/arrow_writer.py\", line 312, in write_batch\r\n col_type = schema.field(col).type if schema is not None else None\r\n File \"pyarrow/types.pxi\", line 1341, in pyarrow.lib.Schema.field\r\nKeyError: 'Column tokens does not exist in schema'\r\n```", "Hi ! Can you open a separate issue for that ?\r\nAlso if you could provide a google colab or a sample code to reproduce this issue that would be helpful.\r\nOn my side I was not able to reproduce this error.", "@lhoestq Sorry I'm just responding now. I'm currently using your recommendation for the map on a single column, and I've gotten it to be fast enough to sort of work for my use case by just setting `num_proc=10`, although it's still quite slow. It's clear that it is still loading the entirety of each row into memory and then discarding everything except the selected column, instead of exploiting the columnar data format to only load the selected column.\r\n\r\nMy code is like this:\r\n```\r\n self.dataset = self.dataset.sort('num_tokens')\r\n batch_dataset = self.dataset.map(\r\n\tcompute_uniform_sized_batches,\r\n\tbatched=True, batch_size=10_000, num_proc=10, input_columns=['num_tokens'],\r\n\tremove_columns=get_columns_all_equal(self.dataset),\r\n\twith_indices=True,\r\n\tfn_kwargs=dict(max_size=tokens_per_batch)\r\n)\r\nself.batches = {\r\n\tname: list(zip(split['start'], split['length']))\r\n\tfor name, split in batch_dataset.items()\r\n}\r\n```\r\nI find that the processes with higher IDs take significantly longer to complete, presumably because the dataset is sorted by article length and they're loading the entire article text into memory, instead of just the 'num_tokens' column.\r\n\r\nI should note that my batching procedure would work best if I just used `batch_size=None` and loaded the whole column into memory at once, but I found that this was intolerably slow and gave me no progress information, so I'm using the less than ideal `batch_size=10_000`.", "Hi @norabelrose ! I'm glad you managed to make this work on your side.\r\nRegarding memory usage, you can try to drop the columns that you don't want to use for your `map` for now.\r\n\r\nIn the future we'll try to find a way to not load unnecessary columns in memory in `map`. Currently the way it works is that it gets the batch as a python dict, then it updates it using the output of your mapping function, and finally it removes columns from `remove_columns`. Therefore for a moment some columns are loaded in memory even if you remove them or don't use them for your mapping function.\r\n\r\nIt would be nice to have a way to optimize memory for cases such as yours !", "@lhoestq After looking through the source code, it looks like the following solution has at least some chance of working:\r\n- refactor `Dataset.map()` so that the `input_columns` parameter is implemented by using the `self.formatted_as()` context manager with `columns=input_columns`\r\n- change `Dataset._getitem()` so that it passes `self._data.drop(drop_columns)` to the `query_table()` function whenever `format_columns` is non-None and `output_all_columns` is False, instead of `self._data` itself", "Looks like a great direction :)\r\nNote that `query_table` doesn't bring data into memory. Only `format_table` does.\r\nAlso the dataset may already have a format with `columns=` already defined so we would need to define the formatted `input_dataset` like:\r\n```python\r\n# before the `map` main for loop\r\ninput_columns = input_columns if input_columns is not None else self.column_names\r\nif not self._output_all_columns:\r\n columns = [col for col in input_columns if self._format_columns is None or col in self._format_columns]\r\n input_dataset = self.with_format(\r\n type=self._format_type,\r\n columns=columns\r\n )\r\nelse:\r\n # in this case we could find a way to filter both format_columns and unformatted columns eventually\r\n input_dataset = self\r\n# then input_dataset can be used in the main for loop of `map`\r\n```\r\n\r\nEDIT: oh and regarding streaming format versus file format for arrow, we plan to start using the file format #1933 at one point (though I'm not sure if it would improve performance)", "Good to know about `query_table` not bringing anything into memory. I was under the impression that it did because a while back I looked at my `map` operation in pdb and it looked like it was spending forever in line 93 of formatting.py, `return pa.concat_tables(....)`, although that was before the `fast_slice` interpolation search was implemented, so it may have had more to do with the slow ChunkedArray slice implementation than anything else.\r\n\r\nIf `query_table` is I/O free then the fix may be as simple as just adding this to line 1779 of arrow_dataset.py:\r\n```python\r\n# Only load the columns we actually need\r\nif input_columns:\r\n stack.enter_context(self.formatted_as(\r\n self._format_type,\r\n columns=input_columns,\r\n output_all_columns=False,\r\n **self._format_kwargs\r\n ))\r\n```\r\nIt's not clear to me why the `[col for col in input_columns if self._format_columns is None or col in self._format_columns]` check would be necessary— it seems like either `input_columns` should simply temporarily override the `_format_columns` within the `map` operation, or we should throw an error if there are any conflicts. Currently it doesn't look like this case is checked for at all within `map`, but maybe I'm just missing it.", "`query_table` simply slices/concatenates parts of the table. The actual data inside the table is not brought in memory.\r\nAlso I'm more in favor of declaring `input_dataset = self.with_format(...)` since `formatted_as` may update the dataset fingerprint of `self`, which is not expected when someone runs `map`.\r\n\r\n> It's not clear to me why the [col for col in input_columns if self._format_columns is None or col in self._format_columns] check would be necessary— it seems like either input_columns should simply temporarily override the _format_columns within the map operation, or we should throw an error if there are any conflicts. Currently it doesn't look like this case is checked for at all within map, but maybe I'm just missing it.\r\n\r\nActually yes we can just use input_columns. And we do need to add a check to make sure there are not conflicts or this could lead to confusing errors.", "That sounds good to me! I just submitted a PR (#2246) implementing your approach. I also changed how `_query_table` handles Iterable keys since it still seemed like `pa.concat_tables` was taking a long time to create the table for each batch. Now my whole `map()` operation takes 1 min 46 seconds where it used to take somewhere on the order of 10 minutes." ]
2021-04-08T18:16:14
2021-04-26T16:13:59
2021-04-26T16:13:59
CONTRIBUTOR
null
I'm currently using the `wikipedia` dataset— I'm tokenizing the articles with the `tokenizers` library using `map()` and also adding a new `num_tokens` column to the dataset as part of that map operation. I want to be able to _filter_ the dataset based on this `num_tokens` column, but even when I specify `input_columns=['num_tokens']`, it seems that the entirety of each row is loaded into memory, which makes the operation take much longer than it should. Indeed, `filter` currently just calls `map`, and I found that in `_map_single` on lines 1690-1704 of `arrow_dataset.py`, the method is just grabbing slices of _all the rows_ of the dataset and then passing only the specified columns to the map function. It seems that, when the user passes a value for `input_columns`, the `map` function should create a temporary pyarrow table by selecting just those columns, and then get slices from that table. Or something like that— I'm not very familiar with the pyarrow API. I know that in the meantime I can sort of get around this by simply only returning the rows that match my filter criterion from the tokenizing function I pass to `map()`, but I actually _also_ want to map on just the `num_tokens` column in order to compute batches with a roughly uniform number of tokens per batch. I would also ideally like to be able to change my minimum and maximum article lengths without having to re-tokenize the entire dataset. PS: This is definitely not a "dataset request." I'm realizing that I don't actually know how to remove labels from my own issues on other people's repos, if that is even possible.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2193/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2193/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2192
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2192/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2192/comments
https://api.github.com/repos/huggingface/datasets/issues/2192/events
https://github.com/huggingface/datasets/pull/2192
853,547,910
MDExOlB1bGxSZXF1ZXN0NjExNjE5NTY0
2,192
Fix typo in huggingface hub
{ "login": "LysandreJik", "id": 30755778, "node_id": "MDQ6VXNlcjMwNzU1Nzc4", "avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LysandreJik", "html_url": "https://github.com/LysandreJik", "followers_url": "https://api.github.com/users/LysandreJik/followers", "following_url": "https://api.github.com/users/LysandreJik/following{/other_user}", "gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}", "starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions", "organizations_url": "https://api.github.com/users/LysandreJik/orgs", "repos_url": "https://api.github.com/users/LysandreJik/repos", "events_url": "https://api.github.com/users/LysandreJik/events{/privacy}", "received_events_url": "https://api.github.com/users/LysandreJik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-08T14:42:24
2021-04-08T15:47:41
2021-04-08T15:47:40
MEMBER
null
pip knows how to resolve to `huggingface_hub`, but conda doesn't! The `packaging` dependency is also required for the build to complete.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2192/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2192/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2192", "html_url": "https://github.com/huggingface/datasets/pull/2192", "diff_url": "https://github.com/huggingface/datasets/pull/2192.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2192.patch", "merged_at": "2021-04-08T15:47:40" }
true
https://api.github.com/repos/huggingface/datasets/issues/2191
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2191/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2191/comments
https://api.github.com/repos/huggingface/datasets/issues/2191/events
https://github.com/huggingface/datasets/pull/2191
853,364,204
MDExOlB1bGxSZXF1ZXN0NjExNDY1Nzc0
2,191
Refactorize tests to use Dataset as context manager
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 2851292821, "node_id": "MDU6TGFiZWwyODUxMjkyODIx", "url": "https://api.github.com/repos/huggingface/datasets/labels/refactoring", "name": "refactoring", "color": "B67A40", "default": false, "description": "Restructuring existing code without changing its external behavior" } ]
closed
false
null
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/1", "html_url": "https://github.com/huggingface/datasets/milestone/1", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/1/labels", "id": 6644198, "node_id": "MDk6TWlsZXN0b25lNjY0NDE5OA==", "number": 1, "title": "1.6", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 4, "state": "closed", "created_at": "2021-04-09T13:07:51", "updated_at": "2021-04-20T16:50:46", "due_on": "2021-04-16T07:00:00", "closed_at": "2021-04-20T16:50:46" }
[]
2021-04-08T11:21:04
2021-04-19T07:53:11
2021-04-19T07:53:10
MEMBER
null
Refactorize Dataset tests to use Dataset as context manager.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2191/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2191/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2191", "html_url": "https://github.com/huggingface/datasets/pull/2191", "diff_url": "https://github.com/huggingface/datasets/pull/2191.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2191.patch", "merged_at": "2021-04-19T07:53:10" }
true
https://api.github.com/repos/huggingface/datasets/issues/2190
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2190/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2190/comments
https://api.github.com/repos/huggingface/datasets/issues/2190/events
https://github.com/huggingface/datasets/issues/2190
853,181,564
MDU6SXNzdWU4NTMxODE1NjQ=
2,190
News_commentary Dataset Translation Pairs are of Incorrect Language Specified Pairs
{ "login": "anassalamah", "id": 8571003, "node_id": "MDQ6VXNlcjg1NzEwMDM=", "avatar_url": "https://avatars.githubusercontent.com/u/8571003?v=4", "gravatar_id": "", "url": "https://api.github.com/users/anassalamah", "html_url": "https://github.com/anassalamah", "followers_url": "https://api.github.com/users/anassalamah/followers", "following_url": "https://api.github.com/users/anassalamah/following{/other_user}", "gists_url": "https://api.github.com/users/anassalamah/gists{/gist_id}", "starred_url": "https://api.github.com/users/anassalamah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/anassalamah/subscriptions", "organizations_url": "https://api.github.com/users/anassalamah/orgs", "repos_url": "https://api.github.com/users/anassalamah/repos", "events_url": "https://api.github.com/users/anassalamah/events{/privacy}", "received_events_url": "https://api.github.com/users/anassalamah/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi @anassalamah,\r\n\r\nCould you please try with this:\r\n```python\r\ntrain_ds = load_dataset(\"news_commentary\", lang1=\"ar\", lang2=\"en\", split='train[:98%]')\r\nval_ds = load_dataset(\"news_commentary\", lang1=\"ar\", lang2=\"en\", split='train[98%:]')\r\n```", "Hello @albertvillanova, \r\n\r\nThanks for the suggestion. I didn't know you could do that. however, it didn't resolve the issue\r\n\r\n![image](https://user-images.githubusercontent.com/8571003/114169966-ec819400-993a-11eb-8a67-930f9a9b2290.png)\r\n" ]
2021-04-08T07:53:43
2021-05-24T10:03:55
2021-05-24T10:03:55
NONE
null
I used load_dataset to load the news_commentary dataset for "ar-en" translation pairs but found translations from Arabic to Hindi. ``` train_ds = load_dataset("news_commentary", "ar-en", split='train[:98%]') val_ds = load_dataset("news_commentary", "ar-en", split='train[98%:]') # filtering out examples that are not ar-en translations but ar-hi val_ds = val_ds.filter(lambda example, indice: indice not in chain(range(1312,1327) ,range(1384,1399), range(1030,1042)), with_indices=True) ``` * I'm fairly new to using datasets so I might be doing something wrong
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2190/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2190/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2189
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2189/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2189/comments
https://api.github.com/repos/huggingface/datasets/issues/2189/events
https://github.com/huggingface/datasets/issues/2189
853,052,891
MDU6SXNzdWU4NTMwNTI4OTE=
2,189
save_to_disk doesn't work when we use concatenate_datasets function before creating the final dataset_object.
{ "login": "shamanez", "id": 16892570, "node_id": "MDQ6VXNlcjE2ODkyNTcw", "avatar_url": "https://avatars.githubusercontent.com/u/16892570?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shamanez", "html_url": "https://github.com/shamanez", "followers_url": "https://api.github.com/users/shamanez/followers", "following_url": "https://api.github.com/users/shamanez/following{/other_user}", "gists_url": "https://api.github.com/users/shamanez/gists{/gist_id}", "starred_url": "https://api.github.com/users/shamanez/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shamanez/subscriptions", "organizations_url": "https://api.github.com/users/shamanez/orgs", "repos_url": "https://api.github.com/users/shamanez/repos", "events_url": "https://api.github.com/users/shamanez/events{/privacy}", "received_events_url": "https://api.github.com/users/shamanez/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! We refactored save_to_disk in #2025 so this doesn't happen.\r\nFeel free to try it on master for now\r\nWe'll do a new release soon" ]
2021-04-08T04:42:53
2022-06-01T16:32:15
2022-06-01T16:32:15
NONE
null
As you can see, it saves the entire dataset. @lhoestq You can check by going through the following example, ``` from datasets import load_from_disk,concatenate_datasets loaded_data=load_from_disk('/home/gsir059/HNSW-ori/my_knowledge_dataset') n=20 kb_list=[loaded_data.shard(n, i, contiguous=True) for i in range(n)] final_dataset=concatenate_datasets([kb_list[1],kb_list[2]]) final_dataset.save_to_disk('/home/gsir059/haha/k.arrow') ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2189/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2189/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2188
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2188/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2188/comments
https://api.github.com/repos/huggingface/datasets/issues/2188/events
https://github.com/huggingface/datasets/issues/2188
853,044,166
MDU6SXNzdWU4NTMwNDQxNjY=
2,188
Duplicate data in Timit dataset
{ "login": "thanh-p", "id": 78190188, "node_id": "MDQ6VXNlcjc4MTkwMTg4", "avatar_url": "https://avatars.githubusercontent.com/u/78190188?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thanh-p", "html_url": "https://github.com/thanh-p", "followers_url": "https://api.github.com/users/thanh-p/followers", "following_url": "https://api.github.com/users/thanh-p/following{/other_user}", "gists_url": "https://api.github.com/users/thanh-p/gists{/gist_id}", "starred_url": "https://api.github.com/users/thanh-p/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thanh-p/subscriptions", "organizations_url": "https://api.github.com/users/thanh-p/orgs", "repos_url": "https://api.github.com/users/thanh-p/repos", "events_url": "https://api.github.com/users/thanh-p/events{/privacy}", "received_events_url": "https://api.github.com/users/thanh-p/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! Thanks for reporting\r\nIf I recall correctly this has been recently fixed #1995\r\nCan you try to upgrade your local version of `datasets` ?\r\n```\r\npip install --upgrade datasets\r\n```", "Hi Ihoestq,\r\n\r\nThank you. It works after upgrading the datasets\r\n" ]
2021-04-08T04:21:54
2021-04-08T12:13:19
2021-04-08T12:13:19
NONE
null
I ran a simple code to list all texts in Timit dataset and the texts were all the same. Is this dataset corrupted? **Code:** timit = load_dataset("timit_asr") print(*timit['train']['text'], sep='\n') **Result:** Would such an act of refusal be useful? Would such an act of refusal be useful? Would such an act of refusal be useful? Would such an act of refusal be useful? ... ... Would such an act of refusal be useful?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2188/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2188/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2186
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2186/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2186/comments
https://api.github.com/repos/huggingface/datasets/issues/2186/events
https://github.com/huggingface/datasets/pull/2186
852,840,819
MDExOlB1bGxSZXF1ZXN0NjExMDMxNzE0
2,186
GEM: new challenge sets
{ "login": "yjernite", "id": 10469459, "node_id": "MDQ6VXNlcjEwNDY5NDU5", "avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yjernite", "html_url": "https://github.com/yjernite", "followers_url": "https://api.github.com/users/yjernite/followers", "following_url": "https://api.github.com/users/yjernite/following{/other_user}", "gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}", "starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yjernite/subscriptions", "organizations_url": "https://api.github.com/users/yjernite/orgs", "repos_url": "https://api.github.com/users/yjernite/repos", "events_url": "https://api.github.com/users/yjernite/events{/privacy}", "received_events_url": "https://api.github.com/users/yjernite/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-07T21:39:07
2021-04-07T21:56:35
2021-04-07T21:56:35
MEMBER
null
This PR updates the GEM dataset to: - remove extraneous fields in WikiAuto after https://github.com/huggingface/datasets/pull/2171 fixed the source - add context and services to Schema Guided Dialog - Add new or update challenge sets for MLSUM ES and DE, XSUM, and SGD
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2186/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 1, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2186/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2186", "html_url": "https://github.com/huggingface/datasets/pull/2186", "diff_url": "https://github.com/huggingface/datasets/pull/2186.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2186.patch", "merged_at": "2021-04-07T21:56:35" }
true
https://api.github.com/repos/huggingface/datasets/issues/2185
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2185/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2185/comments
https://api.github.com/repos/huggingface/datasets/issues/2185/events
https://github.com/huggingface/datasets/issues/2185
852,684,395
MDU6SXNzdWU4NTI2ODQzOTU=
2,185
.map() and distributed training
{ "login": "VictorSanh", "id": 16107619, "node_id": "MDQ6VXNlcjE2MTA3NjE5", "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/VictorSanh", "html_url": "https://github.com/VictorSanh", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "repos_url": "https://api.github.com/users/VictorSanh/repos", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi, one workaround would be to save the mapped(tokenized in your case) file using `save_to_disk`, and having each process load this file using `load_from_disk`. This is what I am doing, and in this case, I turn off the ability to automatically load from the cache.\r\n\r\nAlso, multiprocessing the map function seems to be slower at the moment (#1992), hope this helps you.", "Thanks @hwijeen for the workaround, feels a bit prototypical but it works! (it seems files are written twice then though)\r\n\r\n(I haven't observed slowness using multiprocessed map function but I could be wrong)", "To my understanding, files are written twice anyhow(one after load_dataset, another aftet map). It's just that you now have it at the location where you can see, whereas it was secretlely saved at caching folder(.cache/huggingface/datasets by default)! Correct me if I'm wrong!", "Slowness in multiprocessing has been observed in certain environments but not others. We're investigating ;)", "So to answer my initial question, I was just doing something stupid as I was not re-giving the `preprocessing_num_workers` arguments when launching the distributed training (and it was then set to `None`). I initially thought the hash was computed only with the `tokenize_function` but it's all arguments. Thanks @lhoestq for clarifying!", "This cache process isn't really consistent. I just changed `per_device_train_batch_size` of training script and now it rebuilding the dataset cache!!!! Why?", "Hi ! A `map` function is recomputed if the code changes or if any of the variables it uses changes. Can you check that your function doesn't use `per_device_train_batch_size` or any variable that contains `per_device_train_batch_size` ?", "My code is actually a transformer's example for training t5, I modified a bit:\r\n\r\nhttps://github.com/puraminy/transformers/blob/4b40877132eedb566043f83de8f1d29a84d71430/examples/flax/language-modeling/run_t5_mlm_flax.py#L614\r\n\r\nNo, it doesn't use `per_device_train_batch_size`. I remember it worked for several times and then for no reason or various reasons like the above it started to build the cache again, as if it had an expiration date (maybe), or maybe I had changed the code! \r\n\r\nSo, to get rid of these problems I saved cache with a name (was forced to not use multiple_processes, because otherwise it generates multiple files) and then I load it from this cache file. " ]
2021-04-07T18:22:14
2021-10-23T07:11:15
2021-04-09T15:38:31
MEMBER
null
Hi, I have a question regarding distributed training and the `.map` call on a dataset. I have a local dataset "my_custom_dataset" that I am loading with `datasets = load_from_disk(dataset_path=my_path)`. `dataset` is then tokenized: ```python datasets = load_from_disk(dataset_path=my_path) [...] def tokenize_function(examples): return tokenizer(examples[text_column_name]) logger.info("Mapping dataset to tokenized dataset.") tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=True, ) ``` I am using 31 workers (`preprocessing_num_workers=31`) and thus it creates 31 `cache*.arrow` files in `my_path/train` (there is only a train split). When I relaunch the script, the map is tokenization is skipped in favor of loading the 31 previously cached files, and that's perfect. Everything so far was done by launching a **single process script**. I now launch the same training script in **distributed mode** (`pytorch -m torch.distributed.launch --nproc_per_node 2`). However, once it reaches the map call, it re-does the tokenization... instead of loading the 31 cached files. I tried adding the `cache_file_name` argument: `cache_file_name={"train": my_path/one_of_the_arrow_file}`, but I can't give the 31 cached files, so it probably isn't the right way to do it. **My question: what is the best way to load cached files if they were pre-processed and dumped in multiple arrow files?** It seems automatically handled for single processes but fails on distributed training. - I am following the same structure as the examples of transformers (more specifically [run_clm.py](https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_clm.py) in my case) - I am using 1.5.0 version of datasets if that matters.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2185/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2185/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2184
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2184/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2184/comments
https://api.github.com/repos/huggingface/datasets/issues/2184/events
https://github.com/huggingface/datasets/pull/2184
852,597,258
MDExOlB1bGxSZXF1ZXN0NjEwODIxMTc0
2,184
Implementation of class_encode_column
{ "login": "SBrandeis", "id": 33657802, "node_id": "MDQ6VXNlcjMzNjU3ODAy", "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SBrandeis", "html_url": "https://github.com/SBrandeis", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "repos_url": "https://api.github.com/users/SBrandeis/repos", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-07T16:47:43
2021-04-16T11:44:37
2021-04-16T11:26:59
CONTRIBUTOR
null
Addresses #2176 I'm happy to discuss the API and internals!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2184/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2184/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2184", "html_url": "https://github.com/huggingface/datasets/pull/2184", "diff_url": "https://github.com/huggingface/datasets/pull/2184.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2184.patch", "merged_at": "2021-04-16T11:26:59" }
true
https://api.github.com/repos/huggingface/datasets/issues/2183
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2183/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2183/comments
https://api.github.com/repos/huggingface/datasets/issues/2183/events
https://github.com/huggingface/datasets/pull/2183
852,518,411
MDExOlB1bGxSZXF1ZXN0NjEwNzU3MjUz
2,183
Fix s3fs tests for py36 and py37+
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2021-04-07T15:17:11
2021-04-08T08:54:45
2021-04-08T08:54:44
MEMBER
null
Recently several changes happened: 1. latest versions of `fsspec` require python>3.7 for async features 2. `s3fs` added a dependency on `aiobotocore`, which is not compatible with the `moto` s3 mock context manager This PR fixes both issues, by pinning `fsspec` and `s3fs` for python 3.6, and by using `moto` in server mode to support running the tests on python>=3.7 with the latest version of `fsspec` and `s3fs`. cc @philschmid
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2183/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2183/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2183", "html_url": "https://github.com/huggingface/datasets/pull/2183", "diff_url": "https://github.com/huggingface/datasets/pull/2183.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2183.patch", "merged_at": "2021-04-08T08:54:44" }
true