url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
1.62B
node_id
stringlengths
18
32
number
int64
1
5.62k
title
stringlengths
1
290
user
dict
labels
list
state
stringclasses
1 value
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
3 values
active_lock_reason
null
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
state_reason
stringclasses
2 values
draft
bool
2 classes
pull_request
dict
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/5446
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5446/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5446/comments
https://api.github.com/repos/huggingface/datasets/issues/5446/events
https://github.com/huggingface/datasets/pull/5446
1,550,591,588
PR_kwDODunzps5IMyka
5,446
test v0.12.0.rc0
{ "login": "Wauplin", "id": 11801849, "node_id": "MDQ6VXNlcjExODAxODQ5", "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Wauplin", "html_url": "https://github.com/Wauplin", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "repos_url": "https://api.github.com/users/Wauplin/repos", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-20T10:05:19"
"2023-01-20T10:43:22"
"2023-01-20T10:13:48"
CONTRIBUTOR
null
DO NOT MERGE. Only to test the CI. cc @lhoestq @albertvillanova
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5446/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5446/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5446", "html_url": "https://github.com/huggingface/datasets/pull/5446", "diff_url": "https://github.com/huggingface/datasets/pull/5446.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5446.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/5445
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5445/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5445/comments
https://api.github.com/repos/huggingface/datasets/issues/5445/events
https://github.com/huggingface/datasets/issues/5445
1,550,588,703
I_kwDODunzps5cbBsf
5,445
CI tests are broken: AttributeError: 'mappingproxy' object has no attribute 'target'
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
"2023-01-20T10:03:10"
"2023-01-20T10:28:44"
"2023-01-20T10:28:44"
MEMBER
null
CI tests are broken, raising `AttributeError: 'mappingproxy' object has no attribute 'target'`. See: https://github.com/huggingface/datasets/actions/runs/3966497597/jobs/6797384185 ``` ... ERROR tests/test_streaming_download_manager.py::TestxPath::test_xpath_rglob[mock://top_level-date=2019-10-0[1-4]/*-expected_paths4] - AttributeError: 'mappingproxy' object has no attribute 'target' ===== 2076 passed, 19 skipped, 15 warnings, 47 errors in 115.54s (0:01:55) ===== ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5445/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5445/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5443
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5443/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5443/comments
https://api.github.com/repos/huggingface/datasets/issues/5443/events
https://github.com/huggingface/datasets/pull/5443
1,550,178,914
PR_kwDODunzps5ILbk8
5,443
Update share tutorial
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-20T01:09:14"
"2023-01-20T15:44:45"
"2023-01-20T15:37:30"
MEMBER
null
Based on feedback from discussion #5423, this PR updates the sharing tutorial with a mention of writing your own dataset loading script to support more advanced dataset creation options like multiple configs. I'll open a separate PR to update the *Create a Dataset card* with the new Hub metadata UI update 😄
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5443/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5443/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5443", "html_url": "https://github.com/huggingface/datasets/pull/5443", "diff_url": "https://github.com/huggingface/datasets/pull/5443.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5443.patch", "merged_at": "2023-01-20T15:37:30" }
true
https://api.github.com/repos/huggingface/datasets/issues/5442
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5442/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5442/comments
https://api.github.com/repos/huggingface/datasets/issues/5442/events
https://github.com/huggingface/datasets/issues/5442
1,550,084,450
I_kwDODunzps5cZGli
5,442
OneDrive Integrations with HF Datasets
{ "login": "Mohammed20201991", "id": 59222637, "node_id": "MDQ6VXNlcjU5MjIyNjM3", "avatar_url": "https://avatars.githubusercontent.com/u/59222637?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Mohammed20201991", "html_url": "https://github.com/Mohammed20201991", "followers_url": "https://api.github.com/users/Mohammed20201991/followers", "following_url": "https://api.github.com/users/Mohammed20201991/following{/other_user}", "gists_url": "https://api.github.com/users/Mohammed20201991/gists{/gist_id}", "starred_url": "https://api.github.com/users/Mohammed20201991/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mohammed20201991/subscriptions", "organizations_url": "https://api.github.com/users/Mohammed20201991/orgs", "repos_url": "https://api.github.com/users/Mohammed20201991/repos", "events_url": "https://api.github.com/users/Mohammed20201991/events{/privacy}", "received_events_url": "https://api.github.com/users/Mohammed20201991/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Hi! \r\n\r\nWe use [`fsspec`](https://github.com/fsspec/filesystem_spec) to integrate with storage providers. You can find more info (and the usage examples) in [our docs](https://huggingface.co/docs/datasets/v2.8.0/filesystems#download-and-prepare-a-dataset-into-a-cloud-storage).\r\n\r\n[`gdrivefs`](https://github.com/fsspec/gdrivefs) makes it possible to use Google Drive as a storage service in Datasets, but this is not the case for OneDrive, since its[ Python SDK](https://github.com/OneDrive/onedrive-sdk-python) is not integrated with `fsspec`. Can you please request the integration with `fsspec` in their repo to address this limitation?", "I'm closing this issue as implementing a fsspec-compliant OneDrive filesystem is not our responsibility." ]
"2023-01-19T23:12:08"
"2023-02-24T16:17:51"
"2023-02-24T16:17:51"
NONE
null
### Feature request First of all , I would like to thank all community who are developed DataSet storage and make it free available How to integrate our Onedrive account or any other possible storage clouds (like google drive,...) with the **HF** datasets section. For example, if I have **50GB** on my **Onedrive** account and I want to move between drive and Hugging face repo or vis versa ### Motivation make the dataset section more flexible with other possible storage like the integration between Google Collab and Google drive the storage ### Your contribution Can be done using Hugging face CLI
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5442/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5442/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5440
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5440/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5440/comments
https://api.github.com/repos/huggingface/datasets/issues/5440/events
https://github.com/huggingface/datasets/pull/5440
1,538,361,143
PR_kwDODunzps5HpRbF
5,440
Fix documentation about batch samplers
{ "login": "thomasw21", "id": 24695242, "node_id": "MDQ6VXNlcjI0Njk1MjQy", "avatar_url": "https://avatars.githubusercontent.com/u/24695242?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thomasw21", "html_url": "https://github.com/thomasw21", "followers_url": "https://api.github.com/users/thomasw21/followers", "following_url": "https://api.github.com/users/thomasw21/following{/other_user}", "gists_url": "https://api.github.com/users/thomasw21/gists{/gist_id}", "starred_url": "https://api.github.com/users/thomasw21/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomasw21/subscriptions", "organizations_url": "https://api.github.com/users/thomasw21/orgs", "repos_url": "https://api.github.com/users/thomasw21/repos", "events_url": "https://api.github.com/users/thomasw21/events{/privacy}", "received_events_url": "https://api.github.com/users/thomasw21/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-18T17:04:27"
"2023-01-18T17:57:29"
"2023-01-18T17:50:04"
MEMBER
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5440/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5440/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5440", "html_url": "https://github.com/huggingface/datasets/pull/5440", "diff_url": "https://github.com/huggingface/datasets/pull/5440.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5440.patch", "merged_at": "2023-01-18T17:50:04" }
true
https://api.github.com/repos/huggingface/datasets/issues/5438
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5438/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5438/comments
https://api.github.com/repos/huggingface/datasets/issues/5438/events
https://github.com/huggingface/datasets/pull/5438
1,537,489,730
PR_kwDODunzps5HmWA8
5,438
Update actions/checkout in CD Conda release
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-18T06:53:15"
"2023-01-18T13:49:51"
"2023-01-18T13:42:49"
MEMBER
null
This PR updates the "checkout" GitHub Action to its latest version, as previous ones are deprecated: https://github.blog/changelog/2022-09-22-github-actions-all-actions-will-begin-running-on-node16-instead-of-node12/
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5438/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5438/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5438", "html_url": "https://github.com/huggingface/datasets/pull/5438", "diff_url": "https://github.com/huggingface/datasets/pull/5438.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5438.patch", "merged_at": "2023-01-18T13:42:48" }
true
https://api.github.com/repos/huggingface/datasets/issues/5437
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5437/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5437/comments
https://api.github.com/repos/huggingface/datasets/issues/5437/events
https://github.com/huggingface/datasets/issues/5437
1,536,837,144
I_kwDODunzps5bmkYY
5,437
Can't load png dataset with 4 channel (RGBA)
{ "login": "WiNE-iNEFF", "id": 41611046, "node_id": "MDQ6VXNlcjQxNjExMDQ2", "avatar_url": "https://avatars.githubusercontent.com/u/41611046?v=4", "gravatar_id": "", "url": "https://api.github.com/users/WiNE-iNEFF", "html_url": "https://github.com/WiNE-iNEFF", "followers_url": "https://api.github.com/users/WiNE-iNEFF/followers", "following_url": "https://api.github.com/users/WiNE-iNEFF/following{/other_user}", "gists_url": "https://api.github.com/users/WiNE-iNEFF/gists{/gist_id}", "starred_url": "https://api.github.com/users/WiNE-iNEFF/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/WiNE-iNEFF/subscriptions", "organizations_url": "https://api.github.com/users/WiNE-iNEFF/orgs", "repos_url": "https://api.github.com/users/WiNE-iNEFF/repos", "events_url": "https://api.github.com/users/WiNE-iNEFF/events{/privacy}", "received_events_url": "https://api.github.com/users/WiNE-iNEFF/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! Can you please share the directory structure of your image folder and the `load_dataset` call? We decode images with Pillow, and Pillow supports RGBA PNGs, so this shouldn't be a problem.\r\n\r\n", "> Hi! Can you please share the directory structure of your image folder and the `load_dataset` call? We decode images with Pillow, and Pillow supports RGBA PNGs, so this shouldn't be a problem.\n> \n> \n\nI have only 1 folder that I use in the load_dataset function with the name \"IMGDATA\" and all my 9000 images are located in this folder.\n`\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"IMGDATA\")\n`\nAt the same time, using another data set with images consisting of 3 RGB channels, everything works", "Okay, I figured out what was wrong. When uploading my dataset via Google Drive, the images broke and Pillow couldn't open them. As a result, I solved the problem by downloading the ZIP archive" ]
"2023-01-17T18:22:27"
"2023-01-18T20:20:15"
"2023-01-18T20:20:15"
NONE
null
I try to create dataset which contains about 9000 png images 64x64 in size, and they are all 4-channel (RGBA). When trying to use load_dataset() then a dataset is created from only 2 images. What exactly interferes I can not understand.![Screenshot_20230117_212213.jpg](https://user-images.githubusercontent.com/41611046/212980147-9aa68e30-76e9-4b61-a937-c2fdabd56564.jpg)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5437/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5437/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5436
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5436/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5436/comments
https://api.github.com/repos/huggingface/datasets/issues/5436/events
https://github.com/huggingface/datasets/pull/5436
1,536,633,173
PR_kwDODunzps5Hjh4v
5,436
Revert container image pin in CI benchmarks
{ "login": "0x2b3bfa0", "id": 11387611, "node_id": "MDQ6VXNlcjExMzg3NjEx", "avatar_url": "https://avatars.githubusercontent.com/u/11387611?v=4", "gravatar_id": "", "url": "https://api.github.com/users/0x2b3bfa0", "html_url": "https://github.com/0x2b3bfa0", "followers_url": "https://api.github.com/users/0x2b3bfa0/followers", "following_url": "https://api.github.com/users/0x2b3bfa0/following{/other_user}", "gists_url": "https://api.github.com/users/0x2b3bfa0/gists{/gist_id}", "starred_url": "https://api.github.com/users/0x2b3bfa0/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/0x2b3bfa0/subscriptions", "organizations_url": "https://api.github.com/users/0x2b3bfa0/orgs", "repos_url": "https://api.github.com/users/0x2b3bfa0/repos", "events_url": "https://api.github.com/users/0x2b3bfa0/events{/privacy}", "received_events_url": "https://api.github.com/users/0x2b3bfa0/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-17T15:59:50"
"2023-01-18T09:05:49"
"2023-01-18T06:29:06"
CONTRIBUTOR
null
Closes #5433, reverts #5432, and also: * Uses [ghcr.io container images](https://cml.dev/doc/self-hosted-runners/#docker-images) for extra speed * Updates `actions/checkout` to `v3` (note that `v2` is [deprecated](https://github.blog/changelog/2022-09-22-github-actions-all-actions-will-begin-running-on-node16-instead-of-node12/)) * Follows the new naming convention for environment variables introduced with [iterative/cml#1272](https://github.com/iterative/cml/pull/1272)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5436/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5436/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5436", "html_url": "https://github.com/huggingface/datasets/pull/5436", "diff_url": "https://github.com/huggingface/datasets/pull/5436.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5436.patch", "merged_at": "2023-01-18T06:29:06" }
true
https://api.github.com/repos/huggingface/datasets/issues/5435
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5435/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5435/comments
https://api.github.com/repos/huggingface/datasets/issues/5435/events
https://github.com/huggingface/datasets/issues/5435
1,536,099,300
I_kwDODunzps5bjwPk
5,435
Wrong statement in "Load a Dataset in Streaming mode" leads to data leakage
{ "login": "HaoyuYang59", "id": 80093591, "node_id": "MDQ6VXNlcjgwMDkzNTkx", "avatar_url": "https://avatars.githubusercontent.com/u/80093591?v=4", "gravatar_id": "", "url": "https://api.github.com/users/HaoyuYang59", "html_url": "https://github.com/HaoyuYang59", "followers_url": "https://api.github.com/users/HaoyuYang59/followers", "following_url": "https://api.github.com/users/HaoyuYang59/following{/other_user}", "gists_url": "https://api.github.com/users/HaoyuYang59/gists{/gist_id}", "starred_url": "https://api.github.com/users/HaoyuYang59/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/HaoyuYang59/subscriptions", "organizations_url": "https://api.github.com/users/HaoyuYang59/orgs", "repos_url": "https://api.github.com/users/HaoyuYang59/repos", "events_url": "https://api.github.com/users/HaoyuYang59/events{/privacy}", "received_events_url": "https://api.github.com/users/HaoyuYang59/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Just for your information, Tensorflow confirmed this issue [here.](https://github.com/tensorflow/tensorflow/issues/59279)", "Thanks for reporting, @HaoyuYang59.\r\n\r\nPlease note that these are different \"dataset\" objects: our docs refer to Hugging Face `datasets.Dataset` and not to TensorFlow `tf.data.Dataset`.\r\n\r\nOur `datasets.Dataset.shuffle` method does not have a `reshuffle_each_iteration` argument. Therefore, I would say the statement in our docs is True because they refer to `datasets.Dataset.shuffle`, `datasets.Dataset.skip` and `datasets.Dataset.take`.\r\n\r\nI think this issue is restricted to TensorFlow dataset, and this would be addressed by them in the issue you opened in their repo: https://github.com/tensorflow/tensorflow/issues/59279", "Also note that you are referring to an outdated documentation page: datasets 1.10.2 version\r\n\r\nCurrent datasets version is 2.8.0 and the corresponding documentation page is: https://huggingface.co/docs/datasets/stream#split-dataset", "Hi @albertvillanova thanks for your reply and your explaination here. \r\n\r\nSorry for the confusion as I'm not actually a user of your repo and I just happen to find the thread by Google (and didn't read carefully).\r\n\r\nGreat to know that and you made everything very clear now.\r\n\r\nThanks for your time and sorry for the consusion.\r\n\r\nWishing you a wonderful time. \r\n\r\nRegards" ]
"2023-01-17T10:04:16"
"2023-01-19T09:56:03"
"2023-01-19T09:56:03"
NONE
null
### Describe the bug In the [Split your dataset with take and skip](https://huggingface.co/docs/datasets/v1.10.2/dataset_streaming.html#split-your-dataset-with-take-and-skip), it states: > Using take (or skip) prevents future calls to shuffle from shuffling the dataset shards order, otherwise the taken examples could come from other shards. In this case it only uses the shuffle buffer. Therefore it is advised to shuffle the dataset before splitting using take or skip. See more details in the [Shuffling the dataset: shuffle](https://huggingface.co/docs/datasets/v1.10.2/dataset_streaming.html#iterable-dataset-shuffling) section.` >> \# You can also create splits from a shuffled dataset >> train_dataset = shuffled_dataset.skip(1000) >> eval_dataset = shuffled_dataset.take(1000) Where the shuffled dataset comes from: `shuffled_dataset = dataset.shuffle(buffer_size=10_000, seed=42)` At least in Tensorflow 2.9/2.10/2.11, [docs](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle) states the `reshuffle_each_iteration` argument is `True` by default. This means the dataset would be shuffled after each epoch, and as a result **the validation data would leak into training test**. ### Steps to reproduce the bug N/A ### Expected behavior The `reshuffle_each_iteration` argument should be set to `False`. ### Environment info Tensorflow 2.9/2.10/2.11
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5435/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5435/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5434
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5434/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5434/comments
https://api.github.com/repos/huggingface/datasets/issues/5434/events
https://github.com/huggingface/datasets/issues/5434
1,536,090,042
I_kwDODunzps5bjt-6
5,434
sample_dataset module not found
{ "login": "nickums", "id": 15816213, "node_id": "MDQ6VXNlcjE1ODE2MjEz", "avatar_url": "https://avatars.githubusercontent.com/u/15816213?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nickums", "html_url": "https://github.com/nickums", "followers_url": "https://api.github.com/users/nickums/followers", "following_url": "https://api.github.com/users/nickums/following{/other_user}", "gists_url": "https://api.github.com/users/nickums/gists{/gist_id}", "starred_url": "https://api.github.com/users/nickums/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nickums/subscriptions", "organizations_url": "https://api.github.com/users/nickums/orgs", "repos_url": "https://api.github.com/users/nickums/repos", "events_url": "https://api.github.com/users/nickums/events{/privacy}", "received_events_url": "https://api.github.com/users/nickums/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! Can you describe what the actual error is?", "working on the setfit example script\r\n\r\n from setfit import SetFitModel, SetFitTrainer, sample_dataset\r\n\r\nImportError: cannot import name 'sample_dataset' from 'setfit' (C:\\Python\\Python38\\lib\\site-packages\\setfit\\__init__.py)\r\n\r\n apart from that, I also had to hack these loads to import thses modules:\r\n from datasets.load import load_dataset \r\n from datasets.arrow_dataset import Dataset\r\n from datasets.dataset_dict import DatasetDict", "Hi! This issue is related to the [SetFit](https://github.com/huggingface/setfit) project, so can you please open it there?" ]
"2023-01-17T09:57:54"
"2023-01-19T13:52:12"
"2023-01-19T07:55:11"
NONE
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5434/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5434/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5433
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5433/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5433/comments
https://api.github.com/repos/huggingface/datasets/issues/5433/events
https://github.com/huggingface/datasets/issues/5433
1,536,017,901
I_kwDODunzps5bjcXt
5,433
Support latest Docker image in CI benchmarks
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Sorry, it was us:[^1] https://github.com/iterative/cml/pull/1317 & https://github.com/iterative/cml/issues/1319#issuecomment-1385599559; should be fixed with [v0.18.17](https://github.com/iterative/cml/releases/tag/v0.18.17).\r\n\r\n[^1]: More or less, see https://github.com/yargs/yargs/issues/873.", "Opened https://github.com/huggingface/datasets/pull/5436 unpinning again the container image.", "Hi @0x2b3bfa0, thanks a lot for the investigation, the context about the the root cause and for fixing it!!\r\n\r\nWe are reviewing your PR to unpin the container image." ]
"2023-01-17T09:06:08"
"2023-01-18T06:29:08"
"2023-01-18T06:29:08"
MEMBER
null
Once we find out the root cause of: - #5431 we should revert the temporary pin on the Docker image version introduced by: - #5432
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5433/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5433/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5432
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5432/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5432/comments
https://api.github.com/repos/huggingface/datasets/issues/5432/events
https://github.com/huggingface/datasets/pull/5432
1,535,893,019
PR_kwDODunzps5HhEA8
5,432
Fix CI benchmarks by temporarily pinning Docker image version
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-17T07:15:31"
"2023-01-17T08:58:22"
"2023-01-17T08:51:17"
MEMBER
null
This PR fixes CI benchmarks, by temporarily pinning Docker image version, instead of "latest" tag. It also updates deprecated `cml-send-comment` command and using `cml comment create` instead. Fix #5431.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5432/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5432/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5432", "html_url": "https://github.com/huggingface/datasets/pull/5432", "diff_url": "https://github.com/huggingface/datasets/pull/5432.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5432.patch", "merged_at": "2023-01-17T08:51:17" }
true
https://api.github.com/repos/huggingface/datasets/issues/5431
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5431/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5431/comments
https://api.github.com/repos/huggingface/datasets/issues/5431/events
https://github.com/huggingface/datasets/issues/5431
1,535,862,621
I_kwDODunzps5bi2dd
5,431
CI benchmarks are broken: Unknown arguments: runnerPath, path
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 4296013012, "node_id": "LA_kwDODunzps8AAAABAA_01A", "url": "https://api.github.com/repos/huggingface/datasets/labels/maintenance", "name": "maintenance", "color": "d4c5f9", "default": false, "description": "Maintenance tasks" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
"2023-01-17T06:49:57"
"2023-01-18T06:33:24"
"2023-01-17T08:51:18"
MEMBER
null
Our CI benchmarks are broken, raising `Unknown arguments` error: https://github.com/huggingface/datasets/actions/runs/3932397079/jobs/6724905161 ``` Unknown arguments: runnerPath, path ``` Stack trace: ``` 100%|██████████| 500/500 [00:01<00:00, 338.98ba/s] Updating lock file 'dvc.lock' To track the changes with git, run: git add dvc.lock To enable auto staging, run: dvc config core.autostage true Use `dvc push` to send your updates to remote storage. cml send-comment <markdown file> Global Options: --log Logging verbosity [string] [choices: "error", "warn", "info", "debug"] [default: "info"] --driver Git provider where the repository is hosted [string] [choices: "github", "gitlab", "bitbucket"] [default: infer from the environment] --repo Repository URL or slug [string] [default: infer from the environment] --driver-token, --token CI driver personal/project access token (PAT) [string] [default: infer from the environment] --help Show help [boolean] Options: --target Comment type (`commit`, `pr`, `commit/f00bar`, `pr/42`, `issue/1337`),default is automatic (`pr` but fallback to `commit`). [string] --watch Watch for changes and automatically update the comment [boolean] --publish Upload any local images found in the Markdown report [boolean] [default: true] --publish-url Self-hosted image server URL [string] [default: "https://asset.cml.dev/"] --publish-native, --native Uses driver's native capabilities to upload assets instead of CML's storage; not available on GitHub [boolean] --watermark-title Hidden comment marker (used for targeting in subsequent `cml comment update`); "{workflow}" & "{run}" are auto-replaced [string] [default: ""] Unknown arguments: runnerPath, path Error: Process completed with exit code 1. ``` Issue reported to iterative/cml: - iterative/cml#1319
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5431/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5431/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5429
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5429/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5429/comments
https://api.github.com/repos/huggingface/datasets/issues/5429/events
https://github.com/huggingface/datasets/pull/5429
1,535,192,687
PR_kwDODunzps5HeuyT
5,429
Fix CI by temporarily pinning apache-beam < 2.44.0
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-16T16:20:09"
"2023-01-16T16:51:42"
"2023-01-16T16:49:03"
MEMBER
null
Temporarily pin apache-beam < 2.44.0 Fix #5426.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5429/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5429/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5429", "html_url": "https://github.com/huggingface/datasets/pull/5429", "diff_url": "https://github.com/huggingface/datasets/pull/5429.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5429.patch", "merged_at": "2023-01-16T16:49:03" }
true
https://api.github.com/repos/huggingface/datasets/issues/5427
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5427/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5427/comments
https://api.github.com/repos/huggingface/datasets/issues/5427/events
https://github.com/huggingface/datasets/issues/5427
1,535,162,889
I_kwDODunzps5bgLoJ
5,427
Unable to download dataset id_clickbait
{ "login": "ilos-vigil", "id": 45941585, "node_id": "MDQ6VXNlcjQ1OTQxNTg1", "avatar_url": "https://avatars.githubusercontent.com/u/45941585?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ilos-vigil", "html_url": "https://github.com/ilos-vigil", "followers_url": "https://api.github.com/users/ilos-vigil/followers", "following_url": "https://api.github.com/users/ilos-vigil/following{/other_user}", "gists_url": "https://api.github.com/users/ilos-vigil/gists{/gist_id}", "starred_url": "https://api.github.com/users/ilos-vigil/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ilos-vigil/subscriptions", "organizations_url": "https://api.github.com/users/ilos-vigil/orgs", "repos_url": "https://api.github.com/users/ilos-vigil/repos", "events_url": "https://api.github.com/users/ilos-vigil/events{/privacy}", "received_events_url": "https://api.github.com/users/ilos-vigil/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @ilos-vigil.\r\n\r\nWe have transferred this issue to the corresponding dataset on the Hugging Face Hub: https://huggingface.co/datasets/id_clickbait/discussions/1 " ]
"2023-01-16T16:05:36"
"2023-01-18T09:51:28"
"2023-01-18T09:25:19"
NONE
null
### Describe the bug I tried to download dataset `id_clickbait`, but receive this error message. ``` FileNotFoundError: Couldn't find file at https://md-datasets-cache-zipfiles-prod.s3.eu-west-1.amazonaws.com/k42j7x2kpn-1.zip ``` When i open the link using browser, i got this XML data. ```xml <?xml version="1.0" encoding="UTF-8"?> <Error><Code>NoSuchBucket</Code><Message>The specified bucket does not exist</Message><BucketName>md-datasets-cache-zipfiles-prod</BucketName><RequestId>NVRM6VEEQD69SD00</RequestId><HostId>W/SPDxLGvlCGi0OD6d7mSDvfOAUqLAfvs9nTX50BkJrjMny+X9Jnqp/Li2lG9eTUuT4MUkAA2jjTfCrCiUmu7A==</HostId></Error> ``` ### Steps to reproduce the bug Code snippet: ``` from datasets import load_dataset load_dataset('id_clickbait', 'annotated') load_dataset('id_clickbait', 'raw') ``` Link to Kaggle notebook: https://www.kaggle.com/code/ilosvigil/bug-check-on-id-clickbait-dataset ### Expected behavior Successfully download and load `id_newspaper` dataset. ### Environment info - `datasets` version: 2.8.0 - Platform: Linux-5.15.65+-x86_64-with-debian-bullseye-sid - Python version: 3.7.12 - PyArrow version: 8.0.0 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5427/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5427/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5426
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5426/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5426/comments
https://api.github.com/repos/huggingface/datasets/issues/5426/events
https://github.com/huggingface/datasets/issues/5426
1,535,158,555
I_kwDODunzps5bgKkb
5,426
CI tests are broken: SchemaInferenceError
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
"2023-01-16T16:02:07"
"2023-01-17T07:17:12"
"2023-01-16T16:49:04"
MEMBER
null
CI is broken, raising a `SchemaInferenceError`: see https://github.com/huggingface/datasets/actions/runs/3930901593/jobs/6721492004 ``` FAILED tests/test_beam.py::BeamBuilderTest::test_download_and_prepare_sharded - datasets.arrow_writer.SchemaInferenceError: Please pass `features` or at least one example when writing data ``` Stack trace: ``` ______________ BeamBuilderTest.test_download_and_prepare_sharded _______________ [gw1] linux -- Python 3.7.15 /opt/hostedtoolcache/Python/3.7.15/x64/bin/python self = <tests.test_beam.BeamBuilderTest testMethod=test_download_and_prepare_sharded> @require_beam def test_download_and_prepare_sharded(self): import apache_beam as beam original_write_parquet = beam.io.parquetio.WriteToParquet expected_num_examples = len(get_test_dummy_examples()) with tempfile.TemporaryDirectory() as tmp_cache_dir: builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") with patch("apache_beam.io.parquetio.WriteToParquet") as write_parquet_mock: write_parquet_mock.side_effect = partial(original_write_parquet, num_shards=2) > builder.download_and_prepare() tests/test_beam.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/datasets/builder.py:864: in download_and_prepare **download_and_prepare_kwargs, /opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/datasets/builder.py:1976: in _download_and_prepare num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter)) /opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/datasets/arrow_writer.py:694: in finalize shard_num_bytes, _ = parquet_to_arrow(source, destination) /opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/datasets/arrow_writer.py:740: in parquet_to_arrow num_bytes, num_examples = writer.finalize() _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <datasets.arrow_writer.ArrowWriter object at 0x7f6dcbb3e810> close_stream = True def finalize(self, close_stream=True): self.write_rows_on_file() # In case current_examples < writer_batch_size, but user uses finalize() if self._check_duplicates: self.check_duplicate_keys() # Re-intializing to empty list for next batch self.hkey_record = [] self.write_examples_on_file() # If schema is known, infer features even if no examples were written if self.pa_writer is None and self.schema: self._build_writer(self.schema) if self.pa_writer is not None: self.pa_writer.close() self.pa_writer = None if close_stream: self.stream.close() else: if close_stream: self.stream.close() > raise SchemaInferenceError("Please pass `features` or at least one example when writing data") E datasets.arrow_writer.SchemaInferenceError: Please pass `features` or at least one example when writing data /opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/datasets/arrow_writer.py:593: SchemaInferenceError ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5426/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5426/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5425
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5425/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5425/comments
https://api.github.com/repos/huggingface/datasets/issues/5425/events
https://github.com/huggingface/datasets/issues/5425
1,534,581,850
I_kwDODunzps5bd9xa
5,425
Sort on multiple keys with datasets.Dataset.sort()
{ "login": "rocco-fortuna", "id": 101344863, "node_id": "U_kgDOBgpmXw", "avatar_url": "https://avatars.githubusercontent.com/u/101344863?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rocco-fortuna", "html_url": "https://github.com/rocco-fortuna", "followers_url": "https://api.github.com/users/rocco-fortuna/followers", "following_url": "https://api.github.com/users/rocco-fortuna/following{/other_user}", "gists_url": "https://api.github.com/users/rocco-fortuna/gists{/gist_id}", "starred_url": "https://api.github.com/users/rocco-fortuna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rocco-fortuna/subscriptions", "organizations_url": "https://api.github.com/users/rocco-fortuna/orgs", "repos_url": "https://api.github.com/users/rocco-fortuna/repos", "events_url": "https://api.github.com/users/rocco-fortuna/events{/privacy}", "received_events_url": "https://api.github.com/users/rocco-fortuna/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 1935892877, "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue", "name": "good first issue", "color": "7057ff", "default": true, "description": "Good for newcomers" } ]
closed
false
null
[]
null
[ "Hi! \r\n\r\n`Dataset.sort` calls `df.sort_values` internally, and `df.sort_values` brings all the \"sort\" columns in memory, so sorting on multiple keys could be very expensive. This makes me think that maybe we can replace `df.sort_values` with `pyarrow.compute.sort_indices` - the latter can also sort on multiple keys and currently loads the data into memory; however, there is a plan to eventually implement \"memory-map\" friendly kernels for the Arrow compute ops (using the Acero execution engine). \r\n\r\nSo to address this issue, you should replace `df.sort_values` with `pyarrow.compute.sort_indices` in `Dataset.sort` and adjust the signature of this function (deprecate the `kind` parameter, etc.).\r\n\r\nPS: Feel free to ping us if you need some additional help/pointers", "@mariosasko If I understand the code right, using `pyarrow.compute.sort_indices` would also require changes to the `select` method if it is meant to sort multiple keys. That's because `select` only accepts 1D input for `indices`, not an iterable or similar which would be required for multiple keys unless you want some looping over selects. Doesn't seem that straight-forward but I might be missing something here... ", "@MichlF No, it doesn't require modifying select because sorting on multiple keys also returns a 1D array.\r\n\r\nIt's easier to understand with an example:\r\n```python\r\n>>> import pyarrow as pa\r\n>>> import pyarrow.compute as pc\r\n>>> table = pa.table({\r\n... \"name\": [\"John\", \"Eve\", \"Peter\", \"John\"],\r\n... \"surname\": [\"Johnson\", \"Smith\", \"Smith\", \"Doe\"],\r\n... \"age\": [20, 40, 30, 50],\r\n... })\r\n>>> indices = pc.sort_indices(table, sort_keys=[(\"name\", \"ascending\"), (\"surname\", \"ascending\")])\r\n>>> print(indices)\r\n[\r\n 1,\r\n 3,\r\n 0,\r\n 2\r\n]\r\n```\r\n\r\n", "Thanks for clarifying.\r\nI can prepare a PR to address this issue. This would be my first PR here so I have a few maybe silly questions but:\r\n- What is the preferred input type of `sort_keys` for the sort method? A sequence with name, order tuples like pyarrow's `sort_indices` requires?\r\n- What about backwards compatability: is it supposed to also accept the old way of calling sort() or should both `column` and `kind` be deprecated?\r\n- If `sort_keys` is provided in the same format as for pyarrow's `sort_indices` - i.e. along with order for each column -, `reverse` doesn't make much sense either and should be deprecated as well I assume.", "I think we can have the following signature:\r\n```python\r\ndef sort(\r\n self,\r\n column_names: Union[str, Sequence[str]],\r\n reverse: Union[bool, Sequence[bool]] = False,\r\n kind=\"deprecated\",\r\n null_placement: str = \"last\",\r\n keep_in_memory: bool = False,\r\n load_from_cache_file: bool = True,\r\n indices_cache_file_name: Optional[str] = None,\r\n writer_batch_size: Optional[int] = 1000,\r\n new_fingerprint: Optional[str] = None,\r\n ) -> \"Dataset\":\r\n``` \r\n\r\nSo we should:\r\n* rename`column` to `column_names`. `column` is a positional argument, so it's OK to rename it (not marked as positional-only with \"/\", but still should be fine)\r\n* deprecate `kind`\r\n* keep `reverse` instead of introducing `sort_keys`, but we should allow passing a list of booleans that defines the sort order of each column from `column_names` to it (`reverse = False` would be equal to `[False] * len(column_names)` and `reverse = True` to `[True] * len(column_names)`)", "I am pretty much done with the PR. Just one clarification: `Sequence` in `arrow_dataset.py` is a custom dataclass from `features.py` instead of the `type.hinting` class `Sequence` from Python. Do you suggest using that custom `Sequence` class somehow ? Otherwise signature currently reads instead:\r\n```Python\r\n def sort(\r\n self,\r\n column_names: Union[str, List[str]],\r\n reverse: Union[bool, List[bool]] = False,\r\n kind = \"deprecated\",\r\n null_placement: str = \"last\",\r\n keep_in_memory: bool = False,\r\n load_from_cache_file: bool = True,\r\n indices_cache_file_name: Optional[str] = None,\r\n writer_batch_size: Optional[int] = 1000,\r\n new_fingerprint: Optional[str] = None,\r\n )\r\n```\r\n\r\nAlso, to maintain backwards compatibility, I added conditionals for `null_placement`, because pyarrow's `null_placement` only accepts `at_start` and `at_end`, and not `last` and `first`.\r\nIf that is all good, I think I can open the PR.", "I meant `typing.Sequence` (`datasets.Sequence` is a feature type). \r\n\r\nRegarding `null_placement`, I think we can support both `at_start` and `at_end`, and `last` and `first` (for backward compatibility; convert internally to `at_end` and `at_start` respectively).", "> I meant typing.Sequence (datasets.Sequence is a feature type).\r\n\r\nSorry, I actually meant `typing.Sequence` and not `type.hinting`. However, the issue is still that `dataset.Sequence` is imported in `arrow_dataset.py` so I cannot import and use `typing.Sequence` for the `sort`'s signature without overwriting the `dataset.Sequence` import. The latter is used in the `align_labels_with_mapping` method so it's a necessary import for `arrow_dataset.py`. \r\nTo import `typing.Sequence` as something else than `Sequence` to avoid overwriting may only be confusing and doesn't seem good practice!? The other solution is to keep `List` type hinting as in the signature I posted in my previous post but this excludes other Sequence types and may cause problems further down the line.\r\nPlease advise,\r\nThanks for all the clarifications!", "You can avoid the name collision by renaming `typing.Sequence` to `Sequence_` when importing:\r\n```python\r\nfrom typing import Sequence as Sequence_\r\n```", "Resolved via #5502 " ]
"2023-01-16T09:22:26"
"2023-02-24T16:15:11"
"2023-02-24T16:15:11"
NONE
null
### Feature request From discussion on forum: https://discuss.huggingface.co/t/datasets-dataset-sort-does-not-preserve-ordering/29065/1 `sort()` does not preserve ordering, and it does not support sorting on multiple columns, nor a key function. The suggested solution: > ... having something similar to pandas and be able to specify multiple columns for sorting. We’re already using pandas under the hood to do the sorting in datasets. The suggested workaround: > convert your dataset to pandas and use `df.sort_values()` ### Motivation Preserved ordering when sorting is very handy when one needs to sort on multiple columns, A and B, so that e.g. whenever A is equal for two or more rows, B is kept sorted. Having a parameter to do this in 🤗datasets would be cleaner than going through pandas and back, and it wouldn't add much complexity to the library. Alternatives: - the possibility to specify multiple keys to sort by with decreasing priority (suggested solution), - the ability to provide a key function for sorting, so that one can manually specify the sorting criteria. ### Your contribution I'll be happy to contribute by submitting a PR. Will get documented on `CONTRIBUTING.MD`. Would love to get thoughts on this, if anyone has anything to add.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5425/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5425/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5424
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5424/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5424/comments
https://api.github.com/repos/huggingface/datasets/issues/5424/events
https://github.com/huggingface/datasets/issues/5424
1,534,394,756
I_kwDODunzps5bdQGE
5,424
When applying `ReadInstruction` to custom load it's not DatasetDict but list of Dataset?
{ "login": "macabdul9", "id": 25720695, "node_id": "MDQ6VXNlcjI1NzIwNjk1", "avatar_url": "https://avatars.githubusercontent.com/u/25720695?v=4", "gravatar_id": "", "url": "https://api.github.com/users/macabdul9", "html_url": "https://github.com/macabdul9", "followers_url": "https://api.github.com/users/macabdul9/followers", "following_url": "https://api.github.com/users/macabdul9/following{/other_user}", "gists_url": "https://api.github.com/users/macabdul9/gists{/gist_id}", "starred_url": "https://api.github.com/users/macabdul9/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/macabdul9/subscriptions", "organizations_url": "https://api.github.com/users/macabdul9/orgs", "repos_url": "https://api.github.com/users/macabdul9/repos", "events_url": "https://api.github.com/users/macabdul9/events{/privacy}", "received_events_url": "https://api.github.com/users/macabdul9/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! You can get a `DatasetDict` if you pass a dictionary with read instructions as follows:\r\n```python\r\ninstructions = [\r\n ReadInstruction(split_name=\"train\", from_=0, to=10, unit='%', rounding='closest'),\r\n ReadInstruction(split_name=\"dev\", from_=0, to=10, unit='%', rounding='closest'),\r\n ReadInstruction(split_name=\"test\", from_=0, to=5, unit='%', rounding='closest')\r\n]\r\n\r\ndataset = load_dataset('csv', data_dir=\"data/\", data_files={\"train\":\"train.tsv\", \"dev\":\"dev.tsv\", \"test\":\"test.tsv\"}, delimiter=\"\\t\", split={inst.split_name: inst for inst in instructions})\r\n```\r\n" ]
"2023-01-16T06:54:28"
"2023-02-24T16:19:00"
"2023-02-24T16:19:00"
NONE
null
### Describe the bug I am loading datasets from custom `tsv` files stored locally and applying split instructions for each split. Although the ReadInstruction is being applied correctly and I was expecting it to be `DatasetDict` but instead it is a list of `Dataset`. ### Steps to reproduce the bug Steps to reproduce the behaviour: 1. Import `from datasets import load_dataset, ReadInstruction` 2. Instruction to load the dataset ``` instructions = [ ReadInstruction(split_name="train", from_=0, to=10, unit='%', rounding='closest'), ReadInstruction(split_name="dev", from_=0, to=10, unit='%', rounding='closest'), ReadInstruction(split_name="test", from_=0, to=5, unit='%', rounding='closest') ] ``` 3. Load `dataset = load_dataset('csv', data_dir="data/", data_files={"train":"train.tsv", "dev":"dev.tsv", "test":"test.tsv"}, delimiter="\t", split=instructions)` ### Expected behavior **Current behaviour** ![Screenshot from 2023-01-16 10-45-27](https://user-images.githubusercontent.com/25720695/212614754-306898d8-8c27-4475-9bb8-0321bd939561.png) : **Expected behaviour** ![Screenshot from 2023-01-16 10-45-42](https://user-images.githubusercontent.com/25720695/212614813-0d336bf7-5266-482e-bb96-ef51f64de204.png) ### Environment info ``datasets==2.8.0 `` `Python==3.8.5 ` `Platform - Ubuntu 20.04.4 LTS`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5424/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5424/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5421
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5421/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5421/comments
https://api.github.com/repos/huggingface/datasets/issues/5421/events
https://github.com/huggingface/datasets/issues/5421
1,532,278,307
I_kwDODunzps5bVLYj
5,421
Support case-insensitive Hub dataset name in load_dataset
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Closing as case-insensitivity should be only for URL redirection on the Hub. In the APIs, we will only support the canonical name (https://github.com/huggingface/moon-landing/pull/2399#issuecomment-1382085611)" ]
"2023-01-13T13:07:07"
"2023-01-13T20:12:32"
"2023-01-13T20:12:32"
CONTRIBUTOR
null
### Feature request The dataset name on the Hub is case-insensitive (see https://github.com/huggingface/moon-landing/pull/2399, internal issue), i.e., https://huggingface.co/datasets/GLUE redirects to https://huggingface.co/datasets/glue. Ideally, we could load the glue dataset using the following: ``` from datasets import load_dataset load_dataset('GLUE', 'cola') ``` It breaks because the loading script `GLUE.py` does not exist (`glue.py` should be selected instead). Minor additional comment: in other cases without a loading script, we can load the dataset, but the automatically generated config name depends on the casing: - `load_dataset('severo/danish-wit')` generates the config name `severo--danish-wit-e6fda5b070deb133`, while - `load_dataset('severo/danish-WIT')` generates the config name `severo--danish-WIT-e6fda5b070deb133` ### Motivation To follow the same UX on the Hub and in the datasets library. ### Your contribution ...
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5421/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5421/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5420
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5420/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5420/comments
https://api.github.com/repos/huggingface/datasets/issues/5420/events
https://github.com/huggingface/datasets/pull/5420
1,532,265,742
PR_kwDODunzps5HVAhL
5,420
ci: 🎡 remove two obsolete issue templates
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-13T12:58:43"
"2023-01-13T13:36:00"
"2023-01-13T13:29:01"
CONTRIBUTOR
null
add-dataset is not needed anymore since the "canonical" datasets are on the Hub. And dataset-viewer is managed within the datasets-server project. See https://github.com/huggingface/datasets/issues/new/choose <img width="1245" alt="Capture d’écran 2023-01-13 à 13 59 58" src="https://user-images.githubusercontent.com/1676121/212325813-2d4c30e2-343e-4aa2-8cce-b2b77f45628e.png">
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5420/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5420/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5420", "html_url": "https://github.com/huggingface/datasets/pull/5420", "diff_url": "https://github.com/huggingface/datasets/pull/5420.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5420.patch", "merged_at": "2023-01-13T13:29:01" }
true
https://api.github.com/repos/huggingface/datasets/issues/5418
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5418/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5418/comments
https://api.github.com/repos/huggingface/datasets/issues/5418/events
https://github.com/huggingface/datasets/issues/5418
1,530,111,184
I_kwDODunzps5bM6TQ
5,418
Add ProgressBar for `to_parquet`
{ "login": "zanussbaum", "id": 33707069, "node_id": "MDQ6VXNlcjMzNzA3MDY5", "avatar_url": "https://avatars.githubusercontent.com/u/33707069?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zanussbaum", "html_url": "https://github.com/zanussbaum", "followers_url": "https://api.github.com/users/zanussbaum/followers", "following_url": "https://api.github.com/users/zanussbaum/following{/other_user}", "gists_url": "https://api.github.com/users/zanussbaum/gists{/gist_id}", "starred_url": "https://api.github.com/users/zanussbaum/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zanussbaum/subscriptions", "organizations_url": "https://api.github.com/users/zanussbaum/orgs", "repos_url": "https://api.github.com/users/zanussbaum/repos", "events_url": "https://api.github.com/users/zanussbaum/events{/privacy}", "received_events_url": "https://api.github.com/users/zanussbaum/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "zanussbaum", "id": 33707069, "node_id": "MDQ6VXNlcjMzNzA3MDY5", "avatar_url": "https://avatars.githubusercontent.com/u/33707069?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zanussbaum", "html_url": "https://github.com/zanussbaum", "followers_url": "https://api.github.com/users/zanussbaum/followers", "following_url": "https://api.github.com/users/zanussbaum/following{/other_user}", "gists_url": "https://api.github.com/users/zanussbaum/gists{/gist_id}", "starred_url": "https://api.github.com/users/zanussbaum/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zanussbaum/subscriptions", "organizations_url": "https://api.github.com/users/zanussbaum/orgs", "repos_url": "https://api.github.com/users/zanussbaum/repos", "events_url": "https://api.github.com/users/zanussbaum/events{/privacy}", "received_events_url": "https://api.github.com/users/zanussbaum/received_events", "type": "User", "site_admin": false }
[ { "login": "zanussbaum", "id": 33707069, "node_id": "MDQ6VXNlcjMzNzA3MDY5", "avatar_url": "https://avatars.githubusercontent.com/u/33707069?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zanussbaum", "html_url": "https://github.com/zanussbaum", "followers_url": "https://api.github.com/users/zanussbaum/followers", "following_url": "https://api.github.com/users/zanussbaum/following{/other_user}", "gists_url": "https://api.github.com/users/zanussbaum/gists{/gist_id}", "starred_url": "https://api.github.com/users/zanussbaum/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zanussbaum/subscriptions", "organizations_url": "https://api.github.com/users/zanussbaum/orgs", "repos_url": "https://api.github.com/users/zanussbaum/repos", "events_url": "https://api.github.com/users/zanussbaum/events{/privacy}", "received_events_url": "https://api.github.com/users/zanussbaum/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for your proposal, @zanussbaum. Yes, I agree that would definitely be a nice feature to have!", "@albertvillanova I’m happy to make a quick PR for the feature! let me know ", "That would be awesome ! You can comment `#self-assign` to assign you to this issue and open a PR :) Will be happy to review", "Closing as this has been merged @lhoestq " ]
"2023-01-12T05:06:20"
"2023-01-24T18:18:24"
"2023-01-24T18:18:24"
CONTRIBUTOR
null
### Feature request Add a progress bar for `Dataset.to_parquet`, similar to how `to_json` works. ### Motivation It's a bit frustrating to not know how long a dataset will take to write to file and if it's stuck or not without a progress bar ### Your contribution Sure I can help if needed
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5418/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5418/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5416
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5416/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5416/comments
https://api.github.com/repos/huggingface/datasets/issues/5416/events
https://github.com/huggingface/datasets/pull/5416
1,526,988,113
PR_kwDODunzps5HDLmR
5,416
Fix RuntimeError: Sharding is ambiguous for this dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-10T08:43:19"
"2023-01-18T17:12:17"
"2023-01-18T14:09:02"
MEMBER
null
This PR fixes the RuntimeError: Sharding is ambiguous for this dataset. The error for ambiguous sharding will be raised only if num_proc > 1. Fix #5415, fix #5414. Fix https://huggingface.co/datasets/ami/discussions/3.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5416/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5416/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5416", "html_url": "https://github.com/huggingface/datasets/pull/5416", "diff_url": "https://github.com/huggingface/datasets/pull/5416.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5416.patch", "merged_at": "2023-01-18T14:09:02" }
true
https://api.github.com/repos/huggingface/datasets/issues/5415
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5415/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5415/comments
https://api.github.com/repos/huggingface/datasets/issues/5415/events
https://github.com/huggingface/datasets/issues/5415
1,526,904,861
I_kwDODunzps5bArgd
5,415
RuntimeError: Sharding is ambiguous for this dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
"2023-01-10T07:36:11"
"2023-01-18T14:09:04"
"2023-01-18T14:09:03"
MEMBER
null
### Describe the bug When loading some datasets, a RuntimeError is raised. For example, for "ami" dataset: https://huggingface.co/datasets/ami/discussions/3 ``` .../huggingface/datasets/src/datasets/builder.py in _prepare_split(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size) 1415 fpath = path_join(self._output_dir, fname) 1416 -> 1417 num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs) 1418 if num_input_shards <= 1 and num_proc is not None: 1419 logger.warning( .../huggingface/datasets/src/datasets/utils/sharding.py in _number_of_shards_in_gen_kwargs(gen_kwargs) 10 lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} 11 if len(set(lists_lengths.values())) > 1: ---> 12 raise RuntimeError( 13 ( 14 "Sharding is ambiguous for this dataset: " RuntimeError: Sharding is ambiguous for this dataset: we found several data sources lists of different lengths, and we don't know over which list we should parallelize: - key samples_paths has length 6 - key ids has length 7 - key verification_ids has length 6 To fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, and use tuples otherwise. In the end there should only be one single list, or several lists with the same length. ``` This behavior was introduced when implementing multiprocessing by PR: - #5107 ### Steps to reproduce the bug ```python ds = load_dataset("ami", "microphone-single", split="train", revision="2d7620bb7c3f1aab9f329615c3bdb598069d907a") ``` ### Expected behavior No error raised. ### Environment info Since datasets 2.7.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5415/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5415/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5414
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5414/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5414/comments
https://api.github.com/repos/huggingface/datasets/issues/5414/events
https://github.com/huggingface/datasets/issues/5414
1,525,733,818
I_kwDODunzps5a8Nm6
5,414
Sharding error with Multilingual LibriSpeech
{ "login": "Nithin-Holla", "id": 19574344, "node_id": "MDQ6VXNlcjE5NTc0MzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/19574344?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Nithin-Holla", "html_url": "https://github.com/Nithin-Holla", "followers_url": "https://api.github.com/users/Nithin-Holla/followers", "following_url": "https://api.github.com/users/Nithin-Holla/following{/other_user}", "gists_url": "https://api.github.com/users/Nithin-Holla/gists{/gist_id}", "starred_url": "https://api.github.com/users/Nithin-Holla/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Nithin-Holla/subscriptions", "organizations_url": "https://api.github.com/users/Nithin-Holla/orgs", "repos_url": "https://api.github.com/users/Nithin-Holla/repos", "events_url": "https://api.github.com/users/Nithin-Holla/events{/privacy}", "received_events_url": "https://api.github.com/users/Nithin-Holla/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @Nithin-Holla.\r\n\r\nThis is a known issue for multiple datasets and we are investigating it:\r\n- See e.g.: https://huggingface.co/datasets/ami/discussions/3", "Main issue:\r\n- #5415", "@albertvillanova Thanks! As a workaround for now, can I use the dataset in streaming mode?", "Yes, @Nithin-Holla, in the meantime you can use this dataset in streaming mode." ]
"2023-01-09T14:45:31"
"2023-01-18T14:09:04"
"2023-01-18T14:09:04"
NONE
null
### Describe the bug Loading the German Multilingual LibriSpeech dataset results in a RuntimeError regarding sharding with the following stacktrace: ``` Downloading and preparing dataset multilingual_librispeech/german to /home/nithin/datadrive/cache/huggingface/datasets/facebook___multilingual_librispeech/german/2.1.0/1904af50f57a5c370c9364cc337699cfe496d4e9edcae6648a96be23086362d0... Downloading data files: 100% 3/3 [00:00<00:00, 107.23it/s] Downloading data files: 100% 1/1 [00:00<00:00, 35.08it/s] Downloading data files: 100% 6/6 [00:00<00:00, 303.36it/s] Downloading data files: 100% 3/3 [00:00<00:00, 130.37it/s] Downloading data files: 100% 1049/1049 [00:00<00:00, 4491.40it/s] Downloading data files: 100% 37/37 [00:00<00:00, 1096.78it/s] Downloading data files: 100% 40/40 [00:00<00:00, 1003.93it/s] Extracting data files: 100% 3/3 [00:11<00:00, 2.62s/it] Generating train split: 469942/0 [34:13<00:00, 273.21 examples/s] Output exceeds the size limit. Open the full output data in a text editor --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-14-74fa6d092bdc> in <module> ----> 1 mls = load_dataset(MLS_DATASET, 2 LANGUAGE, 3 cache_dir="~/datadrive/cache/huggingface/datasets", 4 ignore_verifications=True) /anaconda/envs/py38_default/lib/python3.8/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs) 1755 1756 # Download and prepare data -> 1757 builder_instance.download_and_prepare( 1758 download_config=download_config, 1759 download_mode=download_mode, /anaconda/envs/py38_default/lib/python3.8/site-packages/datasets/builder.py in download_and_prepare(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs) 858 if num_proc is not None: 859 prepare_split_kwargs["num_proc"] = num_proc --> 860 self._download_and_prepare( 861 dl_manager=dl_manager, 862 verify_infos=verify_infos, /anaconda/envs/py38_default/lib/python3.8/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_splits_kwargs) 1609 1610 def _download_and_prepare(self, dl_manager, verify_infos, **prepare_splits_kwargs): ... RuntimeError: Sharding is ambiguous for this dataset: we found several data sources lists of different lengths, and we don't know over which list we should parallelize: - key audio_archives has length 1049 - key local_extracted_archive has length 1049 - key limited_ids_paths has length 1 To fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, and use tuples otherwise. In the end there should only be one single list, or several lists with the same length. ``` ### Steps to reproduce the bug Here is the code to reproduce it: ```python from datasets import load_dataset MLS_DATASET = "facebook/multilingual_librispeech" LANGUAGE = "german" mls = load_dataset(MLS_DATASET, LANGUAGE, cache_dir="~/datadrive/cache/huggingface/datasets", ignore_verifications=True) ``` ### Expected behavior The expected behaviour is that the dataset is successfully loaded. ### Environment info - `datasets` version: 2.8.0 - Platform: Linux-5.4.0-1094-azure-x86_64-with-glibc2.10 - Python version: 3.8.8 - PyArrow version: 10.0.1 - Pandas version: 1.2.4
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5414/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5414/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5413
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5413/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5413/comments
https://api.github.com/repos/huggingface/datasets/issues/5413/events
https://github.com/huggingface/datasets/issues/5413
1,524,591,837
I_kwDODunzps5a32zd
5,413
concatenate_datasets fails when two dataset with shards > 1 and unequal shard numbers
{ "login": "ZeguanXiao", "id": 38279341, "node_id": "MDQ6VXNlcjM4Mjc5MzQx", "avatar_url": "https://avatars.githubusercontent.com/u/38279341?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ZeguanXiao", "html_url": "https://github.com/ZeguanXiao", "followers_url": "https://api.github.com/users/ZeguanXiao/followers", "following_url": "https://api.github.com/users/ZeguanXiao/following{/other_user}", "gists_url": "https://api.github.com/users/ZeguanXiao/gists{/gist_id}", "starred_url": "https://api.github.com/users/ZeguanXiao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ZeguanXiao/subscriptions", "organizations_url": "https://api.github.com/users/ZeguanXiao/orgs", "repos_url": "https://api.github.com/users/ZeguanXiao/repos", "events_url": "https://api.github.com/users/ZeguanXiao/events{/privacy}", "received_events_url": "https://api.github.com/users/ZeguanXiao/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi ! Thanks for reporting :)\r\n\r\nI managed to reproduce the hub using\r\n```python\r\n\r\nfrom datasets import concatenate_datasets, Dataset, load_from_disk\r\n\r\nDataset.from_dict({\"a\": range(9)}).save_to_disk(\"tmp/ds1\")\r\nds1 = load_from_disk(\"tmp/ds1\")\r\nds1 = concatenate_datasets([ds1, ds1])\r\n\r\nDataset.from_dict({\"b\": range(6)}).save_to_disk(\"tmp/ds2\")\r\nds2 = load_from_disk(\"tmp/ds2\")\r\nds2 = concatenate_datasets([ds2, ds2, ds2])\r\n\r\nconcatenate_datasets([ds1, ds2], axis=1)\r\n```\r\nand I get\r\n```python\r\nTraceback (most recent call last): \r\n File \"test.py\", line 98, in <module>\r\n dds = concatenate_datasets([ds1, ds2], axis=1)\r\n File \"/Users/.../datasets/combine.py\", line 182, in concatenate_datasets\r\n return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)\r\n File \"/Users/.../datasets/arrow_dataset.py\", line 5499, in _concatenate_map_style_datasets\r\n table = concat_tables([dset._data for dset in dsets], axis=axis)\r\n File \"/Users/.../datasets/table.py\", line 1778, in concat_tables\r\n return ConcatenationTable.from_tables(tables, axis=axis)\r\n File \"/Users/.../datasets/table.py\", line 1483, in from_tables\r\n blocks = _extend_blocks(blocks, table_blocks, axis=axis)\r\n File \"/Users/.../datasets/table.py\", line 1477, in _extend_blocks\r\n result[i].extend(row_blocks)\r\nIndexError: list index out of range\r\n```\r\n\r\nIt appears to happen when the two datasets have a number of shards that is not the same" ]
"2023-01-08T17:01:52"
"2023-01-26T09:27:21"
"2023-01-26T09:27:21"
NONE
null
### Describe the bug When using `concatenate_datasets([dataset1, dataset2], axis = 1)` to concatenate two datasets with shards > 1, it fails: ``` File "/home/xzg/anaconda3/envs/tri-transfer/lib/python3.9/site-packages/datasets/combine.py", line 182, in concatenate_datasets return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) File "/home/xzg/anaconda3/envs/tri-transfer/lib/python3.9/site-packages/datasets/arrow_dataset.py", line 5499, in _concatenate_map_style_datasets table = concat_tables([dset._data for dset in dsets], axis=axis) File "/home/xzg/anaconda3/envs/tri-transfer/lib/python3.9/site-packages/datasets/table.py", line 1778, in concat_tables return ConcatenationTable.from_tables(tables, axis=axis) File "/home/xzg/anaconda3/envs/tri-transfer/lib/python3.9/site-packages/datasets/table.py", line 1483, in from_tables blocks = _extend_blocks(blocks, table_blocks, axis=axis) File "/home/xzg/anaconda3/envs/tri-transfer/lib/python3.9/site-packages/datasets/table.py", line 1477, in _extend_blocks result[i].extend(row_blocks) IndexError: list index out of range ``` ### Steps to reproduce the bug dataset = concatenate_datasets([dataset1, dataset2], axis = 1) ### Expected behavior The datasets are correctly concatenated. ### Environment info datasets==2.8.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5413/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5413/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5412
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5412/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5412/comments
https://api.github.com/repos/huggingface/datasets/issues/5412/events
https://github.com/huggingface/datasets/issues/5412
1,524,250,269
I_kwDODunzps5a2jad
5,412
load_dataset() cannot find dataset_info.json with multiple training runs in parallel
{ "login": "destigres", "id": 7139344, "node_id": "MDQ6VXNlcjcxMzkzNDQ=", "avatar_url": "https://avatars.githubusercontent.com/u/7139344?v=4", "gravatar_id": "", "url": "https://api.github.com/users/destigres", "html_url": "https://github.com/destigres", "followers_url": "https://api.github.com/users/destigres/followers", "following_url": "https://api.github.com/users/destigres/following{/other_user}", "gists_url": "https://api.github.com/users/destigres/gists{/gist_id}", "starred_url": "https://api.github.com/users/destigres/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/destigres/subscriptions", "organizations_url": "https://api.github.com/users/destigres/orgs", "repos_url": "https://api.github.com/users/destigres/repos", "events_url": "https://api.github.com/users/destigres/events{/privacy}", "received_events_url": "https://api.github.com/users/destigres/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! It fails because the dataset is already being prepared by your first run. I'd encourage you to prepare your dataset before using it for multiple trainings.\r\n\r\nYou can also specify another cache directory by passing `cache_dir=` to `load_dataset()`.", "Thank you! What do you mean by prepare it beforehand? I am unclear how to conduct dataset preparation outside of using the `load_dataset` function.", "You can have a separate script that does load_dataset + map + save_to_disk to save your prepared dataset somewhere. Then in your training script you can reload the dataset with load_from_disk", "Thank you! I believe I was running additional map steps after loading, resulting in the cache conflict. " ]
"2023-01-08T00:44:32"
"2023-01-19T20:28:43"
"2023-01-19T20:28:43"
NONE
null
### Describe the bug I have a custom local dataset in JSON form. I am trying to do multiple training runs in parallel. The first training run runs with no issue. However, when I start another run on another GPU, the following code throws this error. If there is a workaround to ignore the cache I think that would solve my problem too. I am using datasets version 2.8.0. ### Steps to reproduce the bug 1. Start training run of GPU 0 loading dataset from ``` load_dataset( "json", data_files=tr_dataset_path, split=f"train", download_mode="force_redownload", ) ``` 2. While GPU 0 is training, start an identical run on GPU 1. GPU 1 will produce the following error: ``` Traceback (most recent call last): File "/local-scratch1/data/mt/code/qq/train.py", line 198, in <module> main() File "/home/username/.local/lib/python3.8/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/username/.local/lib/python3.8/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/username/.local/lib/python3.8/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/username/.local/lib/python3.8/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/local-scratch1/data/mt/code/qq/train.py", line 113, in main load_dataset( File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/datasets/load.py", line 1734, in load_dataset builder_instance = load_dataset_builder( File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/datasets/load.py", line 1518, in load_dataset_builder builder_instance: DatasetBuilder = builder_cls( File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/datasets/builder.py", line 366, in __init__ self.info = DatasetInfo.from_directory(self._cache_dir) File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/datasets/info.py", line 313, in from_directory with fs.open(path_join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f: File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/fsspec/spec.py", line 1094, in open self.open( File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/fsspec/spec.py", line 1106, in open f = self._open( File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/fsspec/implementations/local.py", line 175, in _open return LocalFileOpener(path, mode, fs=self, **kwargs) File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/fsspec/implementations/local.py", line 273, in __init__ self._open() File "/home/username/miniconda3/envs/qq3/lib/python3.8/site-packages/fsspec/implementations/local.py", line 278, in _open self.f = open(self.path, mode=self.mode) FileNotFoundError: [Errno 2] No such file or directory: '/home/username/.cache/huggingface/datasets/json/default-43d06a4aedb25e6d/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51/dataset_info.json' ``` ### Expected behavior Expected behavior: 2nd GPU training run should run the same as 1st GPU training run. ### Environment info Copy-and-paste the text below in your GitHub issue. - `datasets` version: 2.8.0 - Platform: Linux-5.4.0-120-generic-x86_64-with-glibc2.10 - Python version: 3.8.15 - PyArrow version: 9.0.0 - Pandas version: 1.5.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5412/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5412/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5411
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5411/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5411/comments
https://api.github.com/repos/huggingface/datasets/issues/5411/events
https://github.com/huggingface/datasets/pull/5411
1,523,297,786
PR_kwDODunzps5G23-T
5,411
Update docs of S3 filesystem with async aiobotocore
{ "login": "maheshpec", "id": 5677912, "node_id": "MDQ6VXNlcjU2Nzc5MTI=", "avatar_url": "https://avatars.githubusercontent.com/u/5677912?v=4", "gravatar_id": "", "url": "https://api.github.com/users/maheshpec", "html_url": "https://github.com/maheshpec", "followers_url": "https://api.github.com/users/maheshpec/followers", "following_url": "https://api.github.com/users/maheshpec/following{/other_user}", "gists_url": "https://api.github.com/users/maheshpec/gists{/gist_id}", "starred_url": "https://api.github.com/users/maheshpec/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maheshpec/subscriptions", "organizations_url": "https://api.github.com/users/maheshpec/orgs", "repos_url": "https://api.github.com/users/maheshpec/repos", "events_url": "https://api.github.com/users/maheshpec/events{/privacy}", "received_events_url": "https://api.github.com/users/maheshpec/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-06T23:19:17"
"2023-01-18T11:18:59"
"2023-01-18T11:12:04"
CONTRIBUTOR
null
[s3fs has migrated to all async calls](https://github.com/fsspec/s3fs/commit/0de2c6fb3d87c08ea694de96dca0d0834034f8bf). Updating documentation to use `AioSession` while using s3fs for download manager as well as working with datasets
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5411/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5411/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5411", "html_url": "https://github.com/huggingface/datasets/pull/5411", "diff_url": "https://github.com/huggingface/datasets/pull/5411.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5411.patch", "merged_at": "2023-01-18T11:12:04" }
true
https://api.github.com/repos/huggingface/datasets/issues/5410
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5410/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5410/comments
https://api.github.com/repos/huggingface/datasets/issues/5410/events
https://github.com/huggingface/datasets/pull/5410
1,521,168,032
PR_kwDODunzps5GvnJH
5,410
Map-style Dataset to IterableDataset
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-05T18:12:17"
"2023-02-01T18:11:45"
"2023-02-01T16:36:01"
MEMBER
null
Added `ds.to_iterable()` to get an iterable dataset from a map-style arrow dataset. It also has a `num_shards` argument to split the dataset before converting to an iterable dataset. Sharding is important to enable efficient shuffling and parallel loading of iterable datasets. TODO: - [x] tests - [x] docs Fix https://github.com/huggingface/datasets/issues/5265
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5410/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5410/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5410", "html_url": "https://github.com/huggingface/datasets/pull/5410", "diff_url": "https://github.com/huggingface/datasets/pull/5410.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5410.patch", "merged_at": "2023-02-01T16:36:01" }
true
https://api.github.com/repos/huggingface/datasets/issues/5409
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5409/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5409/comments
https://api.github.com/repos/huggingface/datasets/issues/5409/events
https://github.com/huggingface/datasets/pull/5409
1,520,374,219
PR_kwDODunzps5Gs3nL
5,409
Fix deprecation warning when use_auth_token passed to download_and_prepare
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-05T09:10:58"
"2023-01-06T11:06:16"
"2023-01-06T10:59:13"
MEMBER
null
The `DatasetBuilder.download_and_prepare` argument `use_auth_token` was deprecated in: - #5302 However, `use_auth_token` is still passed to `download_and_prepare` in our built-in `io` readers (csv, json, parquet,...). This PR fixes it, so that no deprecation warning is raised. Fix #5407.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5409/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5409/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5409", "html_url": "https://github.com/huggingface/datasets/pull/5409", "diff_url": "https://github.com/huggingface/datasets/pull/5409.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5409.patch", "merged_at": "2023-01-06T10:59:13" }
true
https://api.github.com/repos/huggingface/datasets/issues/5408
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5408/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5408/comments
https://api.github.com/repos/huggingface/datasets/issues/5408/events
https://github.com/huggingface/datasets/issues/5408
1,519,890,752
I_kwDODunzps5al7FA
5,408
dataset map function could not be hash properly
{ "login": "Tungway1990", "id": 68179274, "node_id": "MDQ6VXNlcjY4MTc5Mjc0", "avatar_url": "https://avatars.githubusercontent.com/u/68179274?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Tungway1990", "html_url": "https://github.com/Tungway1990", "followers_url": "https://api.github.com/users/Tungway1990/followers", "following_url": "https://api.github.com/users/Tungway1990/following{/other_user}", "gists_url": "https://api.github.com/users/Tungway1990/gists{/gist_id}", "starred_url": "https://api.github.com/users/Tungway1990/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Tungway1990/subscriptions", "organizations_url": "https://api.github.com/users/Tungway1990/orgs", "repos_url": "https://api.github.com/users/Tungway1990/repos", "events_url": "https://api.github.com/users/Tungway1990/events{/privacy}", "received_events_url": "https://api.github.com/users/Tungway1990/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! On macos I tried with\r\n- py 3.9.11\r\n- datasets 2.8.0\r\n- transformers 4.25.1\r\n- dill 0.3.4\r\n\r\nand I was able to hash `prepare_dataset` correctly:\r\n```python\r\nfrom datasets.fingerprint import Hasher\r\nHasher.hash(prepare_dataset)\r\n```\r\n\r\nWhat version of transformers do you have ? Can you try to call `Hasher.hash` on the the tokenizer and the feature extractor to see which one can't be hashed ?", "Thanks for your prompt reply.\r\n\r\nI update datasets version to 2.8.0 and the warning is gong." ]
"2023-01-05T01:59:59"
"2023-01-06T13:22:19"
"2023-01-06T13:22:18"
NONE
null
### Describe the bug I follow the [blog post](https://huggingface.co/blog/fine-tune-whisper#building-a-demo) to finetune a Cantonese transcribe model. When using map function to prepare dataset, following warning pop out: `common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=1)` > Parameter 'function'=<function prepare_dataset at 0x000001D1D9D79A60> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed. I read https://github.com/huggingface/datasets/issues/4521 and https://github.com/huggingface/datasets/issues/3178 but cannot solve the issue. ### Steps to reproduce the bug ```python from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset("mozilla-foundation/common_voice_11_0", "zh-HK", split="train+validation") common_voice["test"] = load_dataset("mozilla-foundation/common_voice_11_0", "zh-HK", split="test") common_voice = common_voice.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"]) from transformers import WhisperFeatureExtractor, WhisperTokenizer, WhisperProcessor feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small") tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="chinese", task="transcribe") processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="chinese", task="transcribe") from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000)) def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=1) ``` ### Expected behavior Should be no warning shown. ### Environment info - `datasets` version: 2.7.0 - Platform: Windows-10-10.0.19045-SP0 - Python version: 3.9.12 - PyArrow version: 8.0.0 - Pandas version: 1.3.5 - dill version: 0.3.4 - multiprocess version: 0.70.12.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5408/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5408/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5407
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5407/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5407/comments
https://api.github.com/repos/huggingface/datasets/issues/5407/events
https://github.com/huggingface/datasets/issues/5407
1,519,797,345
I_kwDODunzps5alkRh
5,407
Datasets.from_sql() generates deprecation warning
{ "login": "msummerfield", "id": 21002157, "node_id": "MDQ6VXNlcjIxMDAyMTU3", "avatar_url": "https://avatars.githubusercontent.com/u/21002157?v=4", "gravatar_id": "", "url": "https://api.github.com/users/msummerfield", "html_url": "https://github.com/msummerfield", "followers_url": "https://api.github.com/users/msummerfield/followers", "following_url": "https://api.github.com/users/msummerfield/following{/other_user}", "gists_url": "https://api.github.com/users/msummerfield/gists{/gist_id}", "starred_url": "https://api.github.com/users/msummerfield/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/msummerfield/subscriptions", "organizations_url": "https://api.github.com/users/msummerfield/orgs", "repos_url": "https://api.github.com/users/msummerfield/repos", "events_url": "https://api.github.com/users/msummerfield/events{/privacy}", "received_events_url": "https://api.github.com/users/msummerfield/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting @msummerfield. We are fixing it." ]
"2023-01-05T00:43:17"
"2023-01-06T10:59:14"
"2023-01-06T10:59:14"
NONE
null
### Describe the bug Calling `Datasets.from_sql()` generates a warning: `.../site-packages/datasets/builder.py:712: FutureWarning: 'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass 'use_auth_token' to the initializer/'load_dataset_builder' instead.` ### Steps to reproduce the bug Any valid call to `Datasets.from_sql()` will produce the deprecation warning. ### Expected behavior No warning. The fix should be simply to remove the parameter `use_auth_token` from the call to `builder.download_and_prepare()` at line 43 of `io/sql.py` (it is set to `None` anyway, and is not needed). ### Environment info - `datasets` version: 2.8.0 - Platform: Linux-4.15.0-169-generic-x86_64-with-glibc2.27 - Python version: 3.9.15 - PyArrow version: 10.0.1 - Pandas version: 1.5.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5407/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5407/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5403
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5403/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5403/comments
https://api.github.com/repos/huggingface/datasets/issues/5403/events
https://github.com/huggingface/datasets/pull/5403
1,517,466,492
PR_kwDODunzps5Gi3d9
5,403
Replace one letter import in docs
{ "login": "MKhalusova", "id": 1065417, "node_id": "MDQ6VXNlcjEwNjU0MTc=", "avatar_url": "https://avatars.githubusercontent.com/u/1065417?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MKhalusova", "html_url": "https://github.com/MKhalusova", "followers_url": "https://api.github.com/users/MKhalusova/followers", "following_url": "https://api.github.com/users/MKhalusova/following{/other_user}", "gists_url": "https://api.github.com/users/MKhalusova/gists{/gist_id}", "starred_url": "https://api.github.com/users/MKhalusova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MKhalusova/subscriptions", "organizations_url": "https://api.github.com/users/MKhalusova/orgs", "repos_url": "https://api.github.com/users/MKhalusova/repos", "events_url": "https://api.github.com/users/MKhalusova/events{/privacy}", "received_events_url": "https://api.github.com/users/MKhalusova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-03T14:26:32"
"2023-01-03T15:06:18"
"2023-01-03T14:59:01"
CONTRIBUTOR
null
This PR updates a code example for consistency across the docs based on [feedback from this comment](https://github.com/huggingface/transformers/pull/20925/files/9fda31634d203a47d3212e4e8d43d3267faf9808#r1058769500): "In terms of style we usually stay away from one-letter imports like this (even if the community uses them) as they are not always known by beginners and one letter is very undescriptive. Here it wouldn't change anything to use albumentations instead of A."
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5403/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5403/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5403", "html_url": "https://github.com/huggingface/datasets/pull/5403", "diff_url": "https://github.com/huggingface/datasets/pull/5403.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5403.patch", "merged_at": "2023-01-03T14:59:01" }
true
https://api.github.com/repos/huggingface/datasets/issues/5400
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5400/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5400/comments
https://api.github.com/repos/huggingface/datasets/issues/5400/events
https://github.com/huggingface/datasets/pull/5400
1,517,032,972
PR_kwDODunzps5GhaGI
5,400
Support streaming datasets with os.path.exists and Path.exists
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-03T07:42:37"
"2023-01-06T10:42:44"
"2023-01-06T10:35:44"
MEMBER
null
Support streaming datasets with `os.path.exists` and `pathlib.Path.exists`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5400/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5400/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5400", "html_url": "https://github.com/huggingface/datasets/pull/5400", "diff_url": "https://github.com/huggingface/datasets/pull/5400.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5400.patch", "merged_at": "2023-01-06T10:35:44" }
true
https://api.github.com/repos/huggingface/datasets/issues/5399
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5399/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5399/comments
https://api.github.com/repos/huggingface/datasets/issues/5399/events
https://github.com/huggingface/datasets/issues/5399
1,515,548,427
I_kwDODunzps5aVW8L
5,399
Got disconnected from remote data host. Retrying in 5sec [2/20]
{ "login": "alhuri", "id": 46427957, "node_id": "MDQ6VXNlcjQ2NDI3OTU3", "avatar_url": "https://avatars.githubusercontent.com/u/46427957?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alhuri", "html_url": "https://github.com/alhuri", "followers_url": "https://api.github.com/users/alhuri/followers", "following_url": "https://api.github.com/users/alhuri/following{/other_user}", "gists_url": "https://api.github.com/users/alhuri/gists{/gist_id}", "starred_url": "https://api.github.com/users/alhuri/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alhuri/subscriptions", "organizations_url": "https://api.github.com/users/alhuri/orgs", "repos_url": "https://api.github.com/users/alhuri/repos", "events_url": "https://api.github.com/users/alhuri/events{/privacy}", "received_events_url": "https://api.github.com/users/alhuri/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2023-01-01T13:00:11"
"2023-01-02T07:21:52"
"2023-01-02T07:21:52"
NONE
null
### Describe the bug While trying to upload my image dataset of a CSV file type to huggingface by running the below code. The dataset consists of a little over 100k of image-caption pairs ### Steps to reproduce the bug ``` df = pd.read_csv('x.csv', encoding='utf-8-sig') features = Features({ 'link': Image(decode=True), 'caption': Value(dtype='string'), }) #make sure u r logged in to HF ds = Dataset.from_pandas(df, features=features) ds.features ds.push_to_hub("x/x") ``` I got the below error and It always stops at the same progress ``` 100%|██████████| 4/4 [23:53<00:00, 358.48s/ba] 100%|██████████| 4/4 [24:37<00:00, 369.47s/ba]%|▍ | 1/22 [00:06<02:09, 6.16s/it] 100%|██████████| 4/4 [25:00<00:00, 375.15s/ba]%|▉ | 2/22 [25:54<2:36:15, 468.80s/it] 100%|██████████| 4/4 [24:53<00:00, 373.29s/ba]%|█▎ | 3/22 [51:01<4:07:07, 780.39s/it] 100%|██████████| 4/4 [24:01<00:00, 360.34s/ba]%|█▊ | 4/22 [1:17:00<5:04:07, 1013.74s/it] 100%|██████████| 4/4 [23:59<00:00, 359.91s/ba]%|██▎ | 5/22 [1:41:07<5:24:06, 1143.90s/it] 100%|██████████| 4/4 [24:16<00:00, 364.06s/ba]%|██▋ | 6/22 [2:05:14<5:29:15, 1234.74s/it] 100%|██████████| 4/4 [25:24<00:00, 381.10s/ba]%|███▏ | 7/22 [2:29:38<5:25:52, 1303.52s/it] 100%|██████████| 4/4 [25:24<00:00, 381.24s/ba]%|███▋ | 8/22 [2:56:02<5:23:46, 1387.58s/it] 100%|██████████| 4/4 [25:08<00:00, 377.23s/ba]%|████ | 9/22 [3:22:24<5:13:17, 1445.97s/it] 100%|██████████| 4/4 [24:11<00:00, 362.87s/ba]%|████▌ | 10/22 [3:48:24<4:56:02, 1480.19s/it] 100%|██████████| 4/4 [24:44<00:00, 371.11s/ba]%|█████ | 11/22 [4:12:42<4:30:10, 1473.66s/it] 100%|██████████| 4/4 [24:35<00:00, 368.81s/ba]%|█████▍ | 12/22 [4:37:34<4:06:29, 1478.98s/it] 100%|██████████| 4/4 [24:02<00:00, 360.67s/ba]%|█████▉ | 13/22 [5:03:24<3:45:04, 1500.45s/it] 100%|██████████| 4/4 [24:07<00:00, 361.78s/ba]%|██████▎ | 14/22 [5:27:33<3:17:59, 1484.97s/it] 100%|██████████| 4/4 [23:39<00:00, 354.85s/ba]%|██████▊ | 15/22 [5:51:48<2:52:10, 1475.82s/it] Pushing dataset shards to the dataset hub: 73%|███████▎ | 16/22 [6:16:58<2:28:37, 1486.31s/it]Got disconnected from remote data host. Retrying in 5sec [1/20] Got disconnected from remote data host. Retrying in 5sec [2/20] Got disconnected from remote data host. Retrying in 5sec [3/20] Got disconnected from remote data host. Retrying in 5sec [4/20] Got disconnected from remote data host. Retrying in 5sec [5/20] Got disconnected from remote data host. Retrying in 5sec [6/20] Got disconnected from remote data host. Retrying in 5sec [7/20] Got disconnected from remote data host. Retrying in 5sec [8/20] Got disconnected from remote data host. Retrying in 5sec [9/20] ... Got disconnected from remote data host. Retrying in 5sec [19/20] Got disconnected from remote data host. Retrying in 5sec [20/20] 75%|███████▌ | 3/4 [24:47<08:15, 495.86s/ba] Pushing dataset shards to the dataset hub: 73%|███████▎ | 16/22 [6:41:46<2:30:39, 1506.65s/it] Output exceeds the size limit. Open the full output data in a text editor --------------------------------------------------------------------------- ConnectionError Traceback (most recent call last) <ipython-input-1-dbf8530779e9> in <module> 16 ds.features ``` ### Expected behavior I was trying to upload an image dataset and expected it to be fully uploaded ### Environment info - `datasets` version: 2.8.0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.7.9 - PyArrow version: 10.0.1 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5399/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5399/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5398
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5398/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5398/comments
https://api.github.com/repos/huggingface/datasets/issues/5398/events
https://github.com/huggingface/datasets/issues/5398
1,514,425,231
I_kwDODunzps5aREuP
5,398
Unpin pydantic
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-30T10:37:31"
"2022-12-30T10:43:41"
"2022-12-30T10:43:41"
MEMBER
null
Once `pydantic` fixes their issue in their 1.10.3 version, unpin it. See issue: - #5394 See temporary fix: - #5395
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5398/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5398/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5397
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5397/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5397/comments
https://api.github.com/repos/huggingface/datasets/issues/5397/events
https://github.com/huggingface/datasets/pull/5397
1,514,412,246
PR_kwDODunzps5GYirs
5,397
Unpin pydantic test dependency
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-30T10:22:09"
"2022-12-30T10:53:11"
"2022-12-30T10:43:40"
MEMBER
null
Once pydantic-1.10.3 has been yanked, we can unpin it: https://pypi.org/project/pydantic/1.10.3/ See reply by pydantic team https://github.com/pydantic/pydantic/issues/4885#issuecomment-1367819807 ``` v1.10.3 has been yanked. ``` in response to spacy request: https://github.com/pydantic/pydantic/issues/4885#issuecomment-1367810049 ``` On behalf of spacy-related packages: would it be possible for you to temporarily yank v1.10.3? To address this and be compatible with v1.10.4, we'd have to release new versions of a whole series of packages and nearly everyone (including me) is currently on vacation. Even if v1.10.4 is released with a fix, pip would still back off to v1.10.3 for spacy, etc. because of its current pins for typing_extensions. If it could instead back off to v1.10.2, we'd have a bit more breathing room to make the updates on our end. ``` Close #5398.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5397/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5397/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5397", "html_url": "https://github.com/huggingface/datasets/pull/5397", "diff_url": "https://github.com/huggingface/datasets/pull/5397.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5397.patch", "merged_at": "2022-12-30T10:43:40" }
true
https://api.github.com/repos/huggingface/datasets/issues/5396
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5396/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5396/comments
https://api.github.com/repos/huggingface/datasets/issues/5396/events
https://github.com/huggingface/datasets/pull/5396
1,514,002,934
PR_kwDODunzps5GXMhp
5,396
Fix checksum verification
{ "login": "daskol", "id": 9336514, "node_id": "MDQ6VXNlcjkzMzY1MTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/9336514?v=4", "gravatar_id": "", "url": "https://api.github.com/users/daskol", "html_url": "https://github.com/daskol", "followers_url": "https://api.github.com/users/daskol/followers", "following_url": "https://api.github.com/users/daskol/following{/other_user}", "gists_url": "https://api.github.com/users/daskol/gists{/gist_id}", "starred_url": "https://api.github.com/users/daskol/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/daskol/subscriptions", "organizations_url": "https://api.github.com/users/daskol/orgs", "repos_url": "https://api.github.com/users/daskol/repos", "events_url": "https://api.github.com/users/daskol/events{/privacy}", "received_events_url": "https://api.github.com/users/daskol/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-29T19:45:17"
"2023-02-13T11:11:22"
"2023-02-13T11:11:22"
CONTRIBUTOR
null
Expected checksum was verified against checksum dict (not checksum).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5396/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5396/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5396", "html_url": "https://github.com/huggingface/datasets/pull/5396", "diff_url": "https://github.com/huggingface/datasets/pull/5396.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5396.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/5395
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5395/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5395/comments
https://api.github.com/repos/huggingface/datasets/issues/5395/events
https://github.com/huggingface/datasets/pull/5395
1,513,997,335
PR_kwDODunzps5GXLUl
5,395
Temporarily pin pydantic test dependency
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-29T19:34:19"
"2022-12-30T06:36:57"
"2022-12-29T21:00:26"
MEMBER
null
Temporarily pin `pydantic` until a permanent solution is found. Fix #5394.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5395/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5395/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5395", "html_url": "https://github.com/huggingface/datasets/pull/5395", "diff_url": "https://github.com/huggingface/datasets/pull/5395.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5395.patch", "merged_at": "2022-12-29T21:00:26" }
true
https://api.github.com/repos/huggingface/datasets/issues/5394
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5394/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5394/comments
https://api.github.com/repos/huggingface/datasets/issues/5394/events
https://github.com/huggingface/datasets/issues/5394
1,513,976,229
I_kwDODunzps5aPXGl
5,394
CI error: TypeError: dataclass_transform() got an unexpected keyword argument 'field_specifiers'
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "I still getting the same error :\r\n\r\n`python -m spacy download fr_core_news_lg\r\n`.\r\n`import spacy`", "@MFatnassi, this issue and the corresponding fix only affect our Continuous Integration testing environment.\r\n\r\nNote that `datasets` does not depend on `spacy`." ]
"2022-12-29T18:58:44"
"2022-12-30T10:40:51"
"2022-12-29T21:00:27"
MEMBER
null
### Describe the bug While installing the dependencies, the CI raises a TypeError: ``` Traceback (most recent call last): File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/runpy.py", line 183, in _run_module_as_main mod_name, mod_spec, code = _get_module_details(mod_name, _Error) File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/runpy.py", line 142, in _get_module_details return _get_module_details(pkg_main_name, error) File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/runpy.py", line 109, in _get_module_details __import__(pkg_name) File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/spacy/__init__.py", line 6, in <module> from .errors import setup_default_warnings File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/spacy/errors.py", line 2, in <module> from .compat import Literal File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/spacy/compat.py", line 3, in <module> from thinc.util import copy_array File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/thinc/__init__.py", line 5, in <module> from .config import registry File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/thinc/config.py", line 2, in <module> import confection File "/opt/hostedtoolcache/Python/3.7.15/x64/lib/python3.7/site-packages/confection/__init__.py", line 10, in <module> from pydantic import BaseModel, create_model, ValidationError, Extra File "pydantic/__init__.py", line 2, in init pydantic.__init__ File "pydantic/dataclasses.py", line 46, in init pydantic.dataclasses # | None | Attribute is set to None. | File "pydantic/main.py", line 121, in init pydantic.main TypeError: dataclass_transform() got an unexpected keyword argument 'field_specifiers' ``` See: https://github.com/huggingface/datasets/actions/runs/3793736481/jobs/6466356565 ### Steps to reproduce the bug ```shell pip install .[tests,metrics-tests] python -m spacy download en_core_web_sm ``` ### Expected behavior No error. ### Environment info See: https://github.com/huggingface/datasets/actions/runs/3793736481/jobs/6466356565
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5394/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5394/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5393
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5393/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5393/comments
https://api.github.com/repos/huggingface/datasets/issues/5393/events
https://github.com/huggingface/datasets/pull/5393
1,512,908,613
PR_kwDODunzps5GTg0a
5,393
Finish deprecating the fs argument
{ "login": "dconathan", "id": 15098095, "node_id": "MDQ6VXNlcjE1MDk4MDk1", "avatar_url": "https://avatars.githubusercontent.com/u/15098095?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dconathan", "html_url": "https://github.com/dconathan", "followers_url": "https://api.github.com/users/dconathan/followers", "following_url": "https://api.github.com/users/dconathan/following{/other_user}", "gists_url": "https://api.github.com/users/dconathan/gists{/gist_id}", "starred_url": "https://api.github.com/users/dconathan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dconathan/subscriptions", "organizations_url": "https://api.github.com/users/dconathan/orgs", "repos_url": "https://api.github.com/users/dconathan/repos", "events_url": "https://api.github.com/users/dconathan/events{/privacy}", "received_events_url": "https://api.github.com/users/dconathan/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-28T15:33:17"
"2023-01-18T12:42:33"
"2023-01-18T12:35:32"
CONTRIBUTOR
null
See #5385 for some discussion on this The `fs=` arg was depcrecated from `Dataset.save_to_disk` and `Dataset.load_from_disk` in `2.8.0` (to be removed in `3.0.0`). There are a few other places where the `fs=` arg was still used (functions/methods in `datasets.info` and `datasets.load`). This PR adds a similar behavior, warnings and the `storage_options=` arg to these functions and methods. One question: should the "deprecated" / "added" versions be `2.8.1` for the docs/warnings on these? Right now I'm going with "fs was deprecated in 2.8.0" but "storage_options= was added in 2.8.1" where appropriate. @mariosasko
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5393/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 2, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5393/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5393", "html_url": "https://github.com/huggingface/datasets/pull/5393", "diff_url": "https://github.com/huggingface/datasets/pull/5393.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5393.patch", "merged_at": "2023-01-18T12:35:32" }
true
https://api.github.com/repos/huggingface/datasets/issues/5392
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5392/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5392/comments
https://api.github.com/repos/huggingface/datasets/issues/5392/events
https://github.com/huggingface/datasets/pull/5392
1,512,712,529
PR_kwDODunzps5GS2DF
5,392
Fix Colab notebook link
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-28T11:44:53"
"2023-01-03T15:36:14"
"2023-01-03T15:27:31"
MEMBER
null
Fix notebook link to open in Colab.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5392/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5392/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5392", "html_url": "https://github.com/huggingface/datasets/pull/5392", "diff_url": "https://github.com/huggingface/datasets/pull/5392.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5392.patch", "merged_at": "2023-01-03T15:27:31" }
true
https://api.github.com/repos/huggingface/datasets/issues/5390
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5390/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5390/comments
https://api.github.com/repos/huggingface/datasets/issues/5390/events
https://github.com/huggingface/datasets/issues/5390
1,509,357,553
I_kwDODunzps5Z9vfx
5,390
Error when pushing to the CI hub
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hmmm, git bisect tells me that the behavior is the same since https://github.com/huggingface/datasets/commit/67e65c90e9490810b89ee140da11fdd13c356c9c (3 Oct), i.e. https://github.com/huggingface/datasets/pull/4926", "Maybe related to the discussions in https://github.com/huggingface/datasets/pull/5196", "Maybe the current version of moonlanding in Hub CI is the issue.\r\n\r\nI relaunched tests that were working two days ago: now they are failing. https://github.com/huggingface/datasets-server/commit/746414449cae4b311733f8a76e5b3b4ca73b38a9 for example\r\n\r\ncc @huggingface/moon-landing ", "Hi! I don't think this has anything to do with `datasets`. Hub CI seems to be the culprit - the identical failure can be found in [this](https://github.com/huggingface/datasets/pull/5389) PR (with unrelated changes) opened today.", "OK! Thanks for looking at it. Closing then." ]
"2022-12-23T13:36:37"
"2022-12-23T20:29:02"
"2022-12-23T20:29:02"
CONTRIBUTOR
null
### Describe the bug Note that it's a special case where the Hub URL is "https://hub-ci.huggingface.co", which does not appear if we do the same on the Hub (https://huggingface.co). The call to `dataset.push_to_hub(` fails: ``` Pushing dataset shards to the dataset hub: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:01<00:00, 1.93s/it] Traceback (most recent call last): File "reproduce_hubci.py", line 16, in <module> dataset.push_to_hub(repo_id=repo_id, private=False, token=USER_TOKEN, embed_external_files=True) File "/home/slesage/hf/datasets/src/datasets/arrow_dataset.py", line 5025, in push_to_hub HfApi(endpoint=config.HF_ENDPOINT).upload_file( File "/home/slesage/.pyenv/versions/datasets/lib/python3.8/site-packages/huggingface_hub/hf_api.py", line 1346, in upload_file raise err File "/home/slesage/.pyenv/versions/datasets/lib/python3.8/site-packages/huggingface_hub/hf_api.py", line 1337, in upload_file r.raise_for_status() File "/home/slesage/.pyenv/versions/datasets/lib/python3.8/site-packages/requests/models.py", line 953, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://hub-ci.huggingface.co/api/datasets/__DUMMY_DATASETS_SERVER_USER__/bug-16718047265472/upload/main/README.md ``` ### Steps to reproduce the bug ```python # reproduce.py from datasets import Dataset import time USER = "__DUMMY_DATASETS_SERVER_USER__" USER_TOKEN = "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" dataset = Dataset.from_dict({"a": [1, 2, 3]}) repo_id = f"{USER}/bug-{int(time.time() * 10e3)}" dataset.push_to_hub(repo_id=repo_id, private=False, token=USER_TOKEN, embed_external_files=True) ``` ```bash $ HF_ENDPOINT="https://hub-ci.huggingface.co" python reproduce.py ``` ### Expected behavior No error and the dataset should be uploaded to the Hub with the README file (which generates the error). ### Environment info - `datasets` version: 2.8.0 - Platform: Linux-5.15.0-1026-aws-x86_64-with-glibc2.35 - Python version: 3.9.15 - PyArrow version: 7.0.0 - Pandas version: 1.5.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5390/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5390/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5389
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5389/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5389/comments
https://api.github.com/repos/huggingface/datasets/issues/5389/events
https://github.com/huggingface/datasets/pull/5389
1,509,348,626
PR_kwDODunzps5GHsOo
5,389
Fix link in `load_dataset` docstring
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-23T13:26:31"
"2023-01-25T19:00:43"
"2023-01-24T16:33:38"
CONTRIBUTOR
null
Fix https://github.com/huggingface/datasets/issues/5387, fix https://github.com/huggingface/datasets/issues/4566
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5389/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5389/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5389", "html_url": "https://github.com/huggingface/datasets/pull/5389", "diff_url": "https://github.com/huggingface/datasets/pull/5389.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5389.patch", "merged_at": "2023-01-24T16:33:38" }
true
https://api.github.com/repos/huggingface/datasets/issues/5388
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5388/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5388/comments
https://api.github.com/repos/huggingface/datasets/issues/5388/events
https://github.com/huggingface/datasets/issues/5388
1,509,042,348
I_kwDODunzps5Z8iis
5,388
Getting Value Error while loading a dataset..
{ "login": "valmetisrinivas", "id": 51160232, "node_id": "MDQ6VXNlcjUxMTYwMjMy", "avatar_url": "https://avatars.githubusercontent.com/u/51160232?v=4", "gravatar_id": "", "url": "https://api.github.com/users/valmetisrinivas", "html_url": "https://github.com/valmetisrinivas", "followers_url": "https://api.github.com/users/valmetisrinivas/followers", "following_url": "https://api.github.com/users/valmetisrinivas/following{/other_user}", "gists_url": "https://api.github.com/users/valmetisrinivas/gists{/gist_id}", "starred_url": "https://api.github.com/users/valmetisrinivas/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/valmetisrinivas/subscriptions", "organizations_url": "https://api.github.com/users/valmetisrinivas/orgs", "repos_url": "https://api.github.com/users/valmetisrinivas/repos", "events_url": "https://api.github.com/users/valmetisrinivas/events{/privacy}", "received_events_url": "https://api.github.com/users/valmetisrinivas/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! I can't reproduce this error locally (Mac) or in Colab. What version of `datasets` are you using?", "Hi [mariosasko](https://github.com/mariosasko), the datasets version is '2.8.0'.", "@valmetisrinivas you get that error because you imported `datasets` (and thus `fsspec`) before installing `zstandard`.\r\n\r\nPlease, restart your Colab runtime and execute the install commands before importing `datasets`:\r\n```python\r\n!pip install datasets\r\n!pip install zstandard\r\n\r\nfrom datasets import load_dataset\r\n\r\nds = load_dataset(\r\n \"json\",\r\n data_files=\"https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst\",\r\n split=\"train\",\r\n streaming=True,\r\n)\r\nnext(iter(ds))\r\n```", "> @valmetisrinivas you get that error because you imported `datasets` (and thus `fsspec`) before installing `zstandard`.\r\n> \r\n> Please, restart your Colab runtime and execute the install commands before importing `datasets`:\r\n> \r\n> ```python\r\n> !pip install datasets\r\n> !pip install zstandard\r\n> \r\n> from datasets import load_dataset\r\n> \r\n> ds = load_dataset(\r\n> \"json\",\r\n> data_files=\"https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst\",\r\n> split=\"train\",\r\n> streaming=True,\r\n> )\r\n> next(iter(ds))\r\n> ```\r\n\r\nI guess that was the problem, importing datasets before the installation of zstandard. Thank you for the feedback. " ]
"2022-12-23T08:16:43"
"2022-12-29T08:36:33"
"2022-12-27T17:59:09"
NONE
null
### Describe the bug I am trying to load a dataset using Hugging Face Datasets load_dataset method. I am getting the value error as show below. Can someone help with this? I am using Windows laptop and Google Colab notebook. ``` WARNING:datasets.builder:Using custom data configuration default-a1d9e8eaedd958cd --------------------------------------------------------------------------- ValueError Traceback (most recent call last) [<ipython-input-12-5b4fdcb8e6d5>](https://localhost:8080/#) in <module> 6 ) 7 ----> 8 next(iter(law_dataset_streamed)) 17 frames [/usr/local/lib/python3.8/dist-packages/fsspec/core.py](https://localhost:8080/#) in get_compression(urlpath, compression) 485 compression = infer_compression(urlpath) 486 if compression is not None and compression not in compr: --> 487 raise ValueError("Compression type %s not supported" % compression) 488 return compression 489 ValueError: Compression type zstd not supported ``` ### Steps to reproduce the bug ``` !pip install zstandard from datasets import load_dataset lds = load_dataset( "json", data_files="https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst", split="train", streaming=True, ) ``` ### Expected behavior I expect an iterable object as the output 'lds' to be created. ### Environment info Windows laptop with Google Colab notebook
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5388/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5388/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5387
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5387/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5387/comments
https://api.github.com/repos/huggingface/datasets/issues/5387/events
https://github.com/huggingface/datasets/issues/5387
1,508,740,177
I_kwDODunzps5Z7YxR
5,387
Missing documentation page : improve-performance
{ "login": "astariul", "id": 43774355, "node_id": "MDQ6VXNlcjQzNzc0MzU1", "avatar_url": "https://avatars.githubusercontent.com/u/43774355?v=4", "gravatar_id": "", "url": "https://api.github.com/users/astariul", "html_url": "https://github.com/astariul", "followers_url": "https://api.github.com/users/astariul/followers", "following_url": "https://api.github.com/users/astariul/following{/other_user}", "gists_url": "https://api.github.com/users/astariul/gists{/gist_id}", "starred_url": "https://api.github.com/users/astariul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/astariul/subscriptions", "organizations_url": "https://api.github.com/users/astariul/orgs", "repos_url": "https://api.github.com/users/astariul/repos", "events_url": "https://api.github.com/users/astariul/events{/privacy}", "received_events_url": "https://api.github.com/users/astariul/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! Our documentation builder does not support links to sections, hence the bug. This is the link it should point to https://huggingface.co/docs/datasets/v2.8.0/en/cache#improve-performance." ]
"2022-12-23T01:12:57"
"2023-01-24T16:33:40"
"2023-01-24T16:33:40"
NONE
null
### Describe the bug Trying to access https://huggingface.co/docs/datasets/v2.8.0/en/package_reference/cache#improve-performance, the page is missing. The link is in here : https://huggingface.co/docs/datasets/v2.8.0/en/package_reference/loading_methods#datasets.load_dataset.keep_in_memory ### Steps to reproduce the bug Access the page and see it's missing. ### Expected behavior Not missing page ### Environment info Doesn't matter
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5387/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5387/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5386
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5386/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5386/comments
https://api.github.com/repos/huggingface/datasets/issues/5386/events
https://github.com/huggingface/datasets/issues/5386
1,508,592,918
I_kwDODunzps5Z600W
5,386
`max_shard_size` in `datasets.push_to_hub()` breaks with large files
{ "login": "salieri", "id": 1086393, "node_id": "MDQ6VXNlcjEwODYzOTM=", "avatar_url": "https://avatars.githubusercontent.com/u/1086393?v=4", "gravatar_id": "", "url": "https://api.github.com/users/salieri", "html_url": "https://github.com/salieri", "followers_url": "https://api.github.com/users/salieri/followers", "following_url": "https://api.github.com/users/salieri/following{/other_user}", "gists_url": "https://api.github.com/users/salieri/gists{/gist_id}", "starred_url": "https://api.github.com/users/salieri/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/salieri/subscriptions", "organizations_url": "https://api.github.com/users/salieri/orgs", "repos_url": "https://api.github.com/users/salieri/repos", "events_url": "https://api.github.com/users/salieri/events{/privacy}", "received_events_url": "https://api.github.com/users/salieri/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! \r\n\r\nThis behavior stems from the fact that we don't always embed image bytes in the underlying arrow table, which can lead to bad size estimation (we use the first 1000 table rows to [estimate](https://github.com/huggingface/datasets/blob/9a7272cd4222383a5b932b0083a4cc173fda44e8/src/datasets/arrow_dataset.py#L4627) the external file size). We plan to address this in the next major release by always embedding external bytes. In the meantime, you can either shuffle the dataset with `.shuffle().flatten_indices()` to make the estimation more precise or embed the bytes in the table like so:\r\n```python\r\nfrom datasets.table import embed_table_storage\r\nformat = ds.format\r\nds = ds.with_format(\"arrow\")\r\nds = ds.map(embed_table_storage, batched=True)\r\nds = ds.with_format(**format)\r\n...\r\nds.push_to_hub(...)\r\n```", "Embedding the bytes worked like charm. Thanks @mariosasko!" ]
"2022-12-22T21:50:58"
"2022-12-26T23:45:51"
"2022-12-26T23:45:51"
NONE
null
### Describe the bug `max_shard_size` parameter for `datasets.push_to_hub()` works unreliably with large files, generating shard files that are way past the specified limit. In my private dataset, which contains unprocessed images of all sizes (up to `~100MB` per file), I've encountered cases where `max_shard_size='100MB'` results in shard files that are `>2GB` in size. Setting `max_shard_size` to another value, such as `1GB` or `500MB` does not fix this problem. **The real problem is this:** When the shard file size grows too big, the entire dataset breaks because of #4721 and ultimately https://issues.apache.org/jira/browse/ARROW-5030. Since `max_shard_size` does not let one accurately control the size of the shard files, it becomes very easy to build a large dataset without any warnings that it will be broken -- even when you think you are mitigating this problem by setting `max_shard_size`. ``` File " /path/to/sd-test-suite-v1/venv/lib/site-packages/datasets/builder.py", line 1763, in _prepare_split_single for _, table in generator: File " /path/to/sd-test-suite-v1/venv/lib/site-packages/datasets/packaged_modules/parquet/parquet.py", line 69, in _generate_tables for batch_idx, record_batch in enumerate( File "pyarrow/_parquet.pyx", line 1323, in iter_batches File "pyarrow/error.pxi", line 121, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Nested data conversions not implemented for chunked array outputs ``` ### Steps to reproduce the bug 1. Clone [example repo](https://github.com/salieri/hf-dataset-shard-size-bug) 2. Follow steps in [README.md](https://github.com/salieri/hf-dataset-shard-size-bug/blob/main/README.md) 3. After uploading the dataset, you will see that the shard file size varies between `30MB` and `200MB` -- way beyond the `max_shard_size='75MB'` limit (example: `train-00003-of-00131...` is `155MB` in [here](https://huggingface.co/datasets/slri/shard-size-test/tree/main/data)) (Note that this example repo does not generate shard files that are so large that they would trigger #4721) ### Expected behavior The shard file size should remain below or equal to `max_shard_size`. ### Environment info - `datasets` version: 2.8.0 - Platform: Linux-5.10.157-139.675.amzn2.aarch64-aarch64-with-glibc2.17 - Python version: 3.7.15 - PyArrow version: 10.0.1 - Pandas version: 1.3.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5386/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5386/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5385
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5385/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5385/comments
https://api.github.com/repos/huggingface/datasets/issues/5385/events
https://github.com/huggingface/datasets/issues/5385
1,508,535,532
I_kwDODunzps5Z6mzs
5,385
Is `fs=` deprecated in `load_from_disk()` as well?
{ "login": "dconathan", "id": 15098095, "node_id": "MDQ6VXNlcjE1MDk4MDk1", "avatar_url": "https://avatars.githubusercontent.com/u/15098095?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dconathan", "html_url": "https://github.com/dconathan", "followers_url": "https://api.github.com/users/dconathan/followers", "following_url": "https://api.github.com/users/dconathan/following{/other_user}", "gists_url": "https://api.github.com/users/dconathan/gists{/gist_id}", "starred_url": "https://api.github.com/users/dconathan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dconathan/subscriptions", "organizations_url": "https://api.github.com/users/dconathan/orgs", "repos_url": "https://api.github.com/users/dconathan/repos", "events_url": "https://api.github.com/users/dconathan/events{/privacy}", "received_events_url": "https://api.github.com/users/dconathan/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! Yes, we should deprecate the `fs` param here. Would you be interested in submitting a PR? ", "> Hi! Yes, we should deprecate the `fs` param here. Would you be interested in submitting a PR?\r\n\r\nYeah I can do that sometime next week. Should the storage_options be a new arg here? I’ll look around for anywhere else where fs is an arg.", "Closed by #5393." ]
"2022-12-22T21:00:45"
"2023-01-23T10:50:05"
"2023-01-23T10:50:04"
CONTRIBUTOR
null
### Describe the bug The `fs=` argument was deprecated from `Dataset.save_to_disk` and `Dataset.load_from_disk` in favor of automagically figuring it out via fsspec: https://github.com/huggingface/datasets/blob/9a7272cd4222383a5b932b0083a4cc173fda44e8/src/datasets/arrow_dataset.py#L1339-L1340 Is there a reason the same thing shouldn't also apply to `datasets.load.load_from_disk()` as well ? https://github.com/huggingface/datasets/blob/9a7272cd4222383a5b932b0083a4cc173fda44e8/src/datasets/load.py#L1779 ### Steps to reproduce the bug n/a ### Expected behavior n/a ### Environment info n/a
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5385/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5385/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5384
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5384/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5384/comments
https://api.github.com/repos/huggingface/datasets/issues/5384/events
https://github.com/huggingface/datasets/pull/5384
1,508,152,598
PR_kwDODunzps5GDmR6
5,384
Handle 0-dim tensors in `cast_to_python_objects`
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-22T16:15:30"
"2023-01-13T16:10:15"
"2023-01-13T16:00:52"
CONTRIBUTOR
null
Fix #5229
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5384/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5384/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5384", "html_url": "https://github.com/huggingface/datasets/pull/5384", "diff_url": "https://github.com/huggingface/datasets/pull/5384.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5384.patch", "merged_at": "2023-01-13T16:00:52" }
true
https://api.github.com/repos/huggingface/datasets/issues/5382
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5382/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5382/comments
https://api.github.com/repos/huggingface/datasets/issues/5382/events
https://github.com/huggingface/datasets/pull/5382
1,504,788,691
PR_kwDODunzps5F4Q0V
5,382
Raise from disconnect error in xopen
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-20T15:52:44"
"2023-01-26T09:51:13"
"2023-01-26T09:42:45"
MEMBER
null
this way we can know the cause of the disconnect related to https://github.com/huggingface/datasets/issues/5374
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5382/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5382/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5382", "html_url": "https://github.com/huggingface/datasets/pull/5382", "diff_url": "https://github.com/huggingface/datasets/pull/5382.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5382.patch", "merged_at": "2023-01-26T09:42:45" }
true
https://api.github.com/repos/huggingface/datasets/issues/5381
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5381/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5381/comments
https://api.github.com/repos/huggingface/datasets/issues/5381/events
https://github.com/huggingface/datasets/issues/5381
1,504,498,387
I_kwDODunzps5ZrNLT
5,381
Wrong URL for the_pile dataset
{ "login": "LeoGrin", "id": 45738728, "node_id": "MDQ6VXNlcjQ1NzM4NzI4", "avatar_url": "https://avatars.githubusercontent.com/u/45738728?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LeoGrin", "html_url": "https://github.com/LeoGrin", "followers_url": "https://api.github.com/users/LeoGrin/followers", "following_url": "https://api.github.com/users/LeoGrin/following{/other_user}", "gists_url": "https://api.github.com/users/LeoGrin/gists{/gist_id}", "starred_url": "https://api.github.com/users/LeoGrin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LeoGrin/subscriptions", "organizations_url": "https://api.github.com/users/LeoGrin/orgs", "repos_url": "https://api.github.com/users/LeoGrin/repos", "events_url": "https://api.github.com/users/LeoGrin/events{/privacy}", "received_events_url": "https://api.github.com/users/LeoGrin/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! This error can happen if there is a local file/folder with the same name as the requested dataset. And to avoid it, rename the local file/folder.\r\n\r\nSoon, it will be possible to explicitly request a Hub dataset as follows:https://github.com/huggingface/datasets/issues/5228#issuecomment-1313494020" ]
"2022-12-20T12:40:14"
"2023-02-15T16:24:57"
"2023-02-15T16:24:57"
NONE
null
### Describe the bug When trying to load `the_pile` dataset from the library, I get a `FileNotFound` error. ### Steps to reproduce the bug Steps to reproduce: Run: ``` from datasets import load_dataset dataset = load_dataset("the_pile") ``` I get the output: "name": "FileNotFoundError", "message": "Unable to resolve any data file that matches '['**']' at /storage/store/work/lgrinszt/memorization/the_pile with any supported extension ['csv', 'tsv', 'json', 'jsonl', 'parquet', 'txt', 'blp', 'bmp', 'dib', 'bufr', 'cur', 'pcx', 'dcx', 'dds', 'ps', 'eps', 'fit', 'fits', 'fli', 'flc', 'ftc', 'ftu', 'gbr', 'gif', 'grib', 'h5', 'hdf', 'png', 'apng', 'jp2', 'j2k', 'jpc', 'jpf', 'jpx', 'j2c', 'icns', 'ico', 'im', 'iim', 'tif', 'tiff', 'jfif', 'jpe', 'jpg', 'jpeg', 'mpg', 'mpeg', 'msp', 'pcd', 'pxr', 'pbm', 'pgm', 'ppm', 'pnm', 'psd', 'bw', 'rgb', 'rgba', 'sgi', 'ras', 'tga', 'icb', 'vda', 'vst', 'webp', 'wmf', 'emf', 'xbm', 'xpm', 'BLP', 'BMP', 'DIB', 'BUFR', 'CUR', 'PCX', 'DCX', 'DDS', 'PS', 'EPS', 'FIT', 'FITS', 'FLI', 'FLC', 'FTC', 'FTU', 'GBR', 'GIF', 'GRIB', 'H5', 'HDF', 'PNG', 'APNG', 'JP2', 'J2K', 'JPC', 'JPF', 'JPX', 'J2C', 'ICNS', 'ICO', 'IM', 'IIM', 'TIF', 'TIFF', 'JFIF', 'JPE', 'JPG', 'JPEG', 'MPG', 'MPEG', 'MSP', 'PCD', 'PXR', 'PBM', 'PGM', 'PPM', 'PNM', 'PSD', 'BW', 'RGB', 'RGBA', 'SGI', 'RAS', 'TGA', 'ICB', 'VDA', 'VST', 'WEBP', 'WMF', 'EMF', 'XBM', 'XPM', 'aiff', 'au', 'avr', 'caf', 'flac', 'htk', 'svx', 'mat4', 'mat5', 'mpc2k', 'ogg', 'paf', 'pvf', 'raw', 'rf64', 'sd2', 'sds', 'ircam', 'voc', 'w64', 'wav', 'nist', 'wavex', 'wve', 'xi', 'mp3', 'opus', 'AIFF', 'AU', 'AVR', 'CAF', 'FLAC', 'HTK', 'SVX', 'MAT4', 'MAT5', 'MPC2K', 'OGG', 'PAF', 'PVF', 'RAW', 'RF64', 'SD2', 'SDS', 'IRCAM', 'VOC', 'W64', 'WAV', 'NIST', 'WAVEX', 'WVE', 'XI', 'MP3', 'OPUS', 'zip']" ### Expected behavior `the_pile` dataset should be dowloaded. ### Environment info - `datasets` version: 2.7.1 - Platform: Linux-4.15.0-112-generic-x86_64-with-glibc2.27 - Python version: 3.10.8 - PyArrow version: 10.0.1 - Pandas version: 1.5.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5381/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5381/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5379
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5379/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5379/comments
https://api.github.com/repos/huggingface/datasets/issues/5379/events
https://github.com/huggingface/datasets/pull/5379
1,504,010,639
PR_kwDODunzps5F1r2k
5,379
feat: depth estimation dataset guide.
{ "login": "sayakpaul", "id": 22957388, "node_id": "MDQ6VXNlcjIyOTU3Mzg4", "avatar_url": "https://avatars.githubusercontent.com/u/22957388?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sayakpaul", "html_url": "https://github.com/sayakpaul", "followers_url": "https://api.github.com/users/sayakpaul/followers", "following_url": "https://api.github.com/users/sayakpaul/following{/other_user}", "gists_url": "https://api.github.com/users/sayakpaul/gists{/gist_id}", "starred_url": "https://api.github.com/users/sayakpaul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sayakpaul/subscriptions", "organizations_url": "https://api.github.com/users/sayakpaul/orgs", "repos_url": "https://api.github.com/users/sayakpaul/repos", "events_url": "https://api.github.com/users/sayakpaul/events{/privacy}", "received_events_url": "https://api.github.com/users/sayakpaul/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "sayakpaul", "id": 22957388, "node_id": "MDQ6VXNlcjIyOTU3Mzg4", "avatar_url": "https://avatars.githubusercontent.com/u/22957388?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sayakpaul", "html_url": "https://github.com/sayakpaul", "followers_url": "https://api.github.com/users/sayakpaul/followers", "following_url": "https://api.github.com/users/sayakpaul/following{/other_user}", "gists_url": "https://api.github.com/users/sayakpaul/gists{/gist_id}", "starred_url": "https://api.github.com/users/sayakpaul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sayakpaul/subscriptions", "organizations_url": "https://api.github.com/users/sayakpaul/orgs", "repos_url": "https://api.github.com/users/sayakpaul/repos", "events_url": "https://api.github.com/users/sayakpaul/events{/privacy}", "received_events_url": "https://api.github.com/users/sayakpaul/received_events", "type": "User", "site_admin": false }
[ { "login": "sayakpaul", "id": 22957388, "node_id": "MDQ6VXNlcjIyOTU3Mzg4", "avatar_url": "https://avatars.githubusercontent.com/u/22957388?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sayakpaul", "html_url": "https://github.com/sayakpaul", "followers_url": "https://api.github.com/users/sayakpaul/followers", "following_url": "https://api.github.com/users/sayakpaul/following{/other_user}", "gists_url": "https://api.github.com/users/sayakpaul/gists{/gist_id}", "starred_url": "https://api.github.com/users/sayakpaul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sayakpaul/subscriptions", "organizations_url": "https://api.github.com/users/sayakpaul/orgs", "repos_url": "https://api.github.com/users/sayakpaul/repos", "events_url": "https://api.github.com/users/sayakpaul/events{/privacy}", "received_events_url": "https://api.github.com/users/sayakpaul/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-12-20T05:32:11"
"2023-01-13T12:30:31"
"2023-01-13T12:23:34"
MEMBER
null
This PR adds a guide for prepping datasets for depth estimation. PR to add documentation images is up here: https://huggingface.co/datasets/huggingface/documentation-images/discussions/22
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5379/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 1, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5379/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5379", "html_url": "https://github.com/huggingface/datasets/pull/5379", "diff_url": "https://github.com/huggingface/datasets/pull/5379.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5379.patch", "merged_at": "2023-01-13T12:23:34" }
true
https://api.github.com/repos/huggingface/datasets/issues/5378
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5378/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5378/comments
https://api.github.com/repos/huggingface/datasets/issues/5378/events
https://github.com/huggingface/datasets/issues/5378
1,503,887,508
I_kwDODunzps5Zo4CU
5,378
The dataset "the_pile", subset "enron_emails" , load_dataset() failure
{ "login": "shaoyuta", "id": 52023469, "node_id": "MDQ6VXNlcjUyMDIzNDY5", "avatar_url": "https://avatars.githubusercontent.com/u/52023469?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shaoyuta", "html_url": "https://github.com/shaoyuta", "followers_url": "https://api.github.com/users/shaoyuta/followers", "following_url": "https://api.github.com/users/shaoyuta/following{/other_user}", "gists_url": "https://api.github.com/users/shaoyuta/gists{/gist_id}", "starred_url": "https://api.github.com/users/shaoyuta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shaoyuta/subscriptions", "organizations_url": "https://api.github.com/users/shaoyuta/orgs", "repos_url": "https://api.github.com/users/shaoyuta/repos", "events_url": "https://api.github.com/users/shaoyuta/events{/privacy}", "received_events_url": "https://api.github.com/users/shaoyuta/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Thanks for reporting @shaoyuta. We are investigating it.\r\n\r\nWe are transferring the issue to \"the_pile\" Community tab on the Hub: https://huggingface.co/datasets/the_pile/discussions/4" ]
"2022-12-20T02:19:13"
"2022-12-20T07:52:54"
"2022-12-20T07:52:54"
NONE
null
### Describe the bug When run "datasets.load_dataset("the_pile","enron_emails")" failure ![image](https://user-images.githubusercontent.com/52023469/208565302-cfab7b89-0b97-4fa6-a5ba-c11b0b629b1a.png) ### Steps to reproduce the bug Run below code in python cli: >>> import datasets >>> datasets.load_dataset("the_pile","enron_emails") ### Expected behavior Load dataset "the_pile", "enron_emails" successfully. ### Environment info Copy-and-paste the text below in your GitHub issue. - `datasets` version: 2.7.1 - Platform: Linux-5.15.0-53-generic-x86_64-with-glibc2.35 - Python version: 3.10.6 - PyArrow version: 10.0.0 - Pandas version: 1.4.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5378/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5378/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5377
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5377/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5377/comments
https://api.github.com/repos/huggingface/datasets/issues/5377/events
https://github.com/huggingface/datasets/pull/5377
1,503,477,833
PR_kwDODunzps5Fz5lw
5,377
Add a parallel implementation of to_tf_dataset()
{ "login": "Rocketknight1", "id": 12866554, "node_id": "MDQ6VXNlcjEyODY2NTU0", "avatar_url": "https://avatars.githubusercontent.com/u/12866554?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Rocketknight1", "html_url": "https://github.com/Rocketknight1", "followers_url": "https://api.github.com/users/Rocketknight1/followers", "following_url": "https://api.github.com/users/Rocketknight1/following{/other_user}", "gists_url": "https://api.github.com/users/Rocketknight1/gists{/gist_id}", "starred_url": "https://api.github.com/users/Rocketknight1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Rocketknight1/subscriptions", "organizations_url": "https://api.github.com/users/Rocketknight1/orgs", "repos_url": "https://api.github.com/users/Rocketknight1/repos", "events_url": "https://api.github.com/users/Rocketknight1/events{/privacy}", "received_events_url": "https://api.github.com/users/Rocketknight1/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-19T19:40:27"
"2023-01-25T16:28:44"
"2023-01-25T16:21:40"
MEMBER
null
Hey all! Here's a first draft of the PR to add a multiprocessing implementation for `to_tf_dataset()`. It worked in some quick testing for me, but obviously I need to do some much more rigorous testing/benchmarking, and add some proper library tests. The core idea is that we do everything using `multiprocessing` and `numpy`, and just wrap a `tf.data.Dataset` around the output. We could also rewrite the existing single-threaded implementation based on this code, which might simplify it a bit. Checklist: - [X] Add initial draft - [x] Check that it works regardless of whether the `collate_fn` or dataset returns `tf` or `np` arrays - [x] Check that it works with `tf.string` return data - [x] Check indices are correctly reshuffled each epoch - [x] Make sure workers don't try to initialize a GPU device!! - [x] Check `fit()` with multiple epochs works fine and that the progress bar is correct - [x] Check there are no memory leaks or zombie processes - [x] Benchmark performance - [x] Tweak params for dataset inference - can we speed things up there a bit? - [x] Add tests to the library - [x] Add a PR to `transformers` to expose the `num_workers` argument via `prepare_tf_dataset` (will merge after this one is released) - [x] Stop TF console spam!! (almost) - [x] Add a method for creating SHM that doesn't crash if it was left and still linked - [x] Add a barrier for Py <= 3.7 because it doesn't support SharedMemory - [x] Support string dtypes by converting them into fixed-width character arrays
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5377/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5377/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5377", "html_url": "https://github.com/huggingface/datasets/pull/5377", "diff_url": "https://github.com/huggingface/datasets/pull/5377.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5377.patch", "merged_at": "2023-01-25T16:21:40" }
true
https://api.github.com/repos/huggingface/datasets/issues/5376
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5376/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5376/comments
https://api.github.com/repos/huggingface/datasets/issues/5376/events
https://github.com/huggingface/datasets/pull/5376
1,502,730,559
PR_kwDODunzps5FxWkM
5,376
set dev version
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-19T10:56:56"
"2022-12-19T11:01:55"
"2022-12-19T10:57:16"
MEMBER
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5376/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5376/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5376", "html_url": "https://github.com/huggingface/datasets/pull/5376", "diff_url": "https://github.com/huggingface/datasets/pull/5376.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5376.patch", "merged_at": "2022-12-19T10:57:16" }
true
https://api.github.com/repos/huggingface/datasets/issues/5375
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5375/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5375/comments
https://api.github.com/repos/huggingface/datasets/issues/5375/events
https://github.com/huggingface/datasets/pull/5375
1,502,720,404
PR_kwDODunzps5FxUbG
5,375
Release: 2.8.0
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-19T10:48:26"
"2022-12-19T10:55:43"
"2022-12-19T10:53:15"
MEMBER
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5375/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5375/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5375", "html_url": "https://github.com/huggingface/datasets/pull/5375", "diff_url": "https://github.com/huggingface/datasets/pull/5375.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5375.patch", "merged_at": "2022-12-19T10:53:15" }
true
https://api.github.com/repos/huggingface/datasets/issues/5373
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5373/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5373/comments
https://api.github.com/repos/huggingface/datasets/issues/5373/events
https://github.com/huggingface/datasets/pull/5373
1,501,484,197
PR_kwDODunzps5FtRU4
5,373
Simplify skipping
{ "login": "Muennighoff", "id": 62820084, "node_id": "MDQ6VXNlcjYyODIwMDg0", "avatar_url": "https://avatars.githubusercontent.com/u/62820084?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Muennighoff", "html_url": "https://github.com/Muennighoff", "followers_url": "https://api.github.com/users/Muennighoff/followers", "following_url": "https://api.github.com/users/Muennighoff/following{/other_user}", "gists_url": "https://api.github.com/users/Muennighoff/gists{/gist_id}", "starred_url": "https://api.github.com/users/Muennighoff/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Muennighoff/subscriptions", "organizations_url": "https://api.github.com/users/Muennighoff/orgs", "repos_url": "https://api.github.com/users/Muennighoff/repos", "events_url": "https://api.github.com/users/Muennighoff/events{/privacy}", "received_events_url": "https://api.github.com/users/Muennighoff/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-17T17:23:52"
"2022-12-18T21:43:31"
"2022-12-18T21:40:21"
CONTRIBUTOR
null
Was hoping to find a way to speed up the skipping as I'm running into bottlenecks skipping 100M examples on C4 (it takes 12 hours to skip), but didn't find anything better than this small change :( Maybe there's a way to directly skip whole shards to speed it up? 🧐
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5373/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5373/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5373", "html_url": "https://github.com/huggingface/datasets/pull/5373", "diff_url": "https://github.com/huggingface/datasets/pull/5373.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5373.patch", "merged_at": "2022-12-18T21:40:21" }
true
https://api.github.com/repos/huggingface/datasets/issues/5372
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5372/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5372/comments
https://api.github.com/repos/huggingface/datasets/issues/5372/events
https://github.com/huggingface/datasets/pull/5372
1,501,377,802
PR_kwDODunzps5Fs9w5
5,372
Fix streaming pandas.read_excel
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-17T12:58:52"
"2023-01-06T11:50:58"
"2023-01-06T11:43:37"
MEMBER
null
This PR fixes `xpandas_read_excel`: - Support passing a path string, besides a file-like object - Support passing `use_auth_token` - First assumes the host server supports HTTP range requests; only if a ValueError is thrown (Cannot seek streaming HTTP file), then it preserves previous behavior (see [#3355](https://github.com/huggingface/datasets/pull/3355)). Fix https://huggingface.co/datasets/bigbio/meqsum/discussions/1 Fix: - https://github.com/bigscience-workshop/biomedical/issues/801 Related to: - #3355
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5372/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5372/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5372", "html_url": "https://github.com/huggingface/datasets/pull/5372", "diff_url": "https://github.com/huggingface/datasets/pull/5372.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5372.patch", "merged_at": "2023-01-06T11:43:37" }
true
https://api.github.com/repos/huggingface/datasets/issues/5369
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5369/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5369/comments
https://api.github.com/repos/huggingface/datasets/issues/5369/events
https://github.com/huggingface/datasets/pull/5369
1,500,622,276
PR_kwDODunzps5Fqaj-
5,369
Distributed support
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-16T17:43:47"
"2023-01-16T13:36:12"
"2023-01-16T13:33:32"
MEMBER
null
To split your dataset across your training nodes, you can use the new [`datasets.distributed.split_dataset_by_node`]: ```python import os from datasets.distributed import split_dataset_by_node ds = split_dataset_by_node(ds, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"])) ``` This works for both map-style datasets and iterable datasets. The dataset is split for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. This can also be combined with a `torch.utils.data.DataLoader` if you want each node to use multiple workers to load the data. This also supports shuffling. At each epoch, the iterable dataset shards are reshuffled across all the nodes - you just have to call `iterable_ds.set_epoch(epoch_number)`. TODO: - [x] docs for usage in PyTorch - [x] unit tests - [x] integration tests with torch.distributed.launch Related to https://github.com/huggingface/transformers/issues/20770 Close https://github.com/huggingface/datasets/issues/5360
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5369/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5369/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5369", "html_url": "https://github.com/huggingface/datasets/pull/5369", "diff_url": "https://github.com/huggingface/datasets/pull/5369.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5369.patch", "merged_at": "2023-01-16T13:33:32" }
true
https://api.github.com/repos/huggingface/datasets/issues/5368
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5368/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5368/comments
https://api.github.com/repos/huggingface/datasets/issues/5368/events
https://github.com/huggingface/datasets/pull/5368
1,500,322,973
PR_kwDODunzps5FpZyx
5,368
Align remove columns behavior and input dict mutation in `map` with previous behavior
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-16T14:28:47"
"2022-12-16T16:28:08"
"2022-12-16T16:25:12"
CONTRIBUTOR
null
Align the `remove_columns` behavior and input dict mutation in `map` with the behavior before https://github.com/huggingface/datasets/pull/5252.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5368/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5368/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5368", "html_url": "https://github.com/huggingface/datasets/pull/5368", "diff_url": "https://github.com/huggingface/datasets/pull/5368.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5368.patch", "merged_at": "2022-12-16T16:25:12" }
true
https://api.github.com/repos/huggingface/datasets/issues/5367
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5367/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5367/comments
https://api.github.com/repos/huggingface/datasets/issues/5367/events
https://github.com/huggingface/datasets/pull/5367
1,499,174,749
PR_kwDODunzps5FlevK
5,367
Fix remove columns from lazy dict
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-15T22:04:12"
"2022-12-15T22:27:53"
"2022-12-15T22:24:50"
MEMBER
null
This was introduced in https://github.com/huggingface/datasets/pull/5252 and causing the transformers CI to break: https://app.circleci.com/pipelines/github/huggingface/transformers/53886/workflows/522faf2e-a053-454c-94f8-a617fde33393/jobs/648597 Basically this code should return a dataset with only one column: ```python from datasets import * ds = Dataset.from_dict({"a": range(5)}) def f(x): x["b"] = x["a"] return x ds = ds.map(f, remove_columns=["a"]) assert ds.column_names == ["b"] ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5367/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5367/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5367", "html_url": "https://github.com/huggingface/datasets/pull/5367", "diff_url": "https://github.com/huggingface/datasets/pull/5367.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5367.patch", "merged_at": "2022-12-15T22:24:50" }
true
https://api.github.com/repos/huggingface/datasets/issues/5366
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5366/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5366/comments
https://api.github.com/repos/huggingface/datasets/issues/5366/events
https://github.com/huggingface/datasets/pull/5366
1,498,530,851
PR_kwDODunzps5FjSFl
5,366
ExamplesIterable fixes
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-15T14:23:05"
"2022-12-15T14:44:47"
"2022-12-15T14:41:45"
MEMBER
null
fix typing and ExamplesIterable.shard_data_sources
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5366/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5366/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5366", "html_url": "https://github.com/huggingface/datasets/pull/5366", "diff_url": "https://github.com/huggingface/datasets/pull/5366.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5366.patch", "merged_at": "2022-12-15T14:41:45" }
true
https://api.github.com/repos/huggingface/datasets/issues/5365
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5365/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5365/comments
https://api.github.com/repos/huggingface/datasets/issues/5365/events
https://github.com/huggingface/datasets/pull/5365
1,498,422,466
PR_kwDODunzps5Fi6ZD
5,365
fix: image array should support other formats than uint8
{ "login": "vigsterkr", "id": 30353, "node_id": "MDQ6VXNlcjMwMzUz", "avatar_url": "https://avatars.githubusercontent.com/u/30353?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vigsterkr", "html_url": "https://github.com/vigsterkr", "followers_url": "https://api.github.com/users/vigsterkr/followers", "following_url": "https://api.github.com/users/vigsterkr/following{/other_user}", "gists_url": "https://api.github.com/users/vigsterkr/gists{/gist_id}", "starred_url": "https://api.github.com/users/vigsterkr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vigsterkr/subscriptions", "organizations_url": "https://api.github.com/users/vigsterkr/orgs", "repos_url": "https://api.github.com/users/vigsterkr/repos", "events_url": "https://api.github.com/users/vigsterkr/events{/privacy}", "received_events_url": "https://api.github.com/users/vigsterkr/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-15T13:17:50"
"2023-01-26T18:46:45"
"2023-01-26T18:39:36"
CONTRIBUTOR
null
Currently images that are provided as ndarrays, but not in `uint8` format are going to loose data. Namely, for example in a depth image where the data is in float32 format, the type-casting to uint8 will basically make the whole image blank. `PIL.Image.fromarray` [does support mode `F`](https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes). although maybe some further metadata could be supplied via the [Image](https://huggingface.co/docs/datasets/v2.7.1/en/package_reference/main_classes#datasets.Image) object.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5365/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5365/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5365", "html_url": "https://github.com/huggingface/datasets/pull/5365", "diff_url": "https://github.com/huggingface/datasets/pull/5365.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5365.patch", "merged_at": "2023-01-26T18:39:36" }
true
https://api.github.com/repos/huggingface/datasets/issues/5363
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5363/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5363/comments
https://api.github.com/repos/huggingface/datasets/issues/5363/events
https://github.com/huggingface/datasets/issues/5363
1,498,171,317
I_kwDODunzps5ZTEe1
5,363
Dataset.from_generator() crashes on simple example
{ "login": "villmow", "id": 2743060, "node_id": "MDQ6VXNlcjI3NDMwNjA=", "avatar_url": "https://avatars.githubusercontent.com/u/2743060?v=4", "gravatar_id": "", "url": "https://api.github.com/users/villmow", "html_url": "https://github.com/villmow", "followers_url": "https://api.github.com/users/villmow/followers", "following_url": "https://api.github.com/users/villmow/following{/other_user}", "gists_url": "https://api.github.com/users/villmow/gists{/gist_id}", "starred_url": "https://api.github.com/users/villmow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/villmow/subscriptions", "organizations_url": "https://api.github.com/users/villmow/orgs", "repos_url": "https://api.github.com/users/villmow/repos", "events_url": "https://api.github.com/users/villmow/events{/privacy}", "received_events_url": "https://api.github.com/users/villmow/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-15T10:21:28"
"2022-12-15T11:51:33"
"2022-12-15T11:51:33"
NONE
null
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5363/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5363/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5362
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5362/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5362/comments
https://api.github.com/repos/huggingface/datasets/issues/5362/events
https://github.com/huggingface/datasets/issues/5362
1,497,643,744
I_kwDODunzps5ZRDrg
5,362
Run 'GPT-J' failure due to download dataset fail (' ConnectionError: Couldn't reach http://eaidata.bmk.sh/data/enron_emails.jsonl.zst ' )
{ "login": "shaoyuta", "id": 52023469, "node_id": "MDQ6VXNlcjUyMDIzNDY5", "avatar_url": "https://avatars.githubusercontent.com/u/52023469?v=4", "gravatar_id": "", "url": "https://api.github.com/users/shaoyuta", "html_url": "https://github.com/shaoyuta", "followers_url": "https://api.github.com/users/shaoyuta/followers", "following_url": "https://api.github.com/users/shaoyuta/following{/other_user}", "gists_url": "https://api.github.com/users/shaoyuta/gists{/gist_id}", "starred_url": "https://api.github.com/users/shaoyuta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/shaoyuta/subscriptions", "organizations_url": "https://api.github.com/users/shaoyuta/orgs", "repos_url": "https://api.github.com/users/shaoyuta/repos", "events_url": "https://api.github.com/users/shaoyuta/events{/privacy}", "received_events_url": "https://api.github.com/users/shaoyuta/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @shaoyuta.\r\n\r\nWe have checked and yes, apparently there is an issue with the server hosting the data of the \"enron_emails\" subset of \"the_pile\" dataset: http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\r\nIt seems to be down: The connection has timed out.\r\n\r\nPlease note that at the Hugging Face Hub, we are not hosting their data for this dataset, but only a script that downloads the data from their servers. We are updating the data URL to one in another server.\r\n\r\nIn the meantime, please note that you can train your model in the entire \"the_pile\" dataset, by passing the \"all\" config (instead of the \"enron_emails\" one).", "We have transferred this issue to the corresponding dataset Community tab: https://huggingface.co/datasets/the_pile/discussions/2\r\n\r\nPlease, follow the updates there." ]
"2022-12-15T01:23:03"
"2022-12-15T07:45:54"
"2022-12-15T07:45:53"
NONE
null
### Describe the bug Run model "GPT-J" with dataset "the_pile" fail. The fail out is as below: ![image](https://user-images.githubusercontent.com/52023469/207750127-118d9896-35f4-4ee9-90d4-d0ab9aae9c74.png) Looks like which is due to "http://eaidata.bmk.sh/data/enron_emails.jsonl.zst" unreachable . ### Steps to reproduce the bug Steps to reproduce this issue: git clone https://github.com/huggingface/transformers cd transformers python examples/pytorch/language-modeling/run_clm.py --model_name_or_path EleutherAI/gpt-j-6B --dataset_name the_pile --dataset_config_name enron_emails --do_eval --output_dir /tmp/output --overwrite_output_dir ### Expected behavior This issue looks like due to "http://eaidata.bmk.sh/data/enron_emails.jsonl.zst " couldn't be reached. Is there another way to download the dataset "the_pile" ? Is there another way to cache the dataset "the_pile" but not let the hg to download it when runtime ? ### Environment info huggingface_hub version: 0.11.1 Platform: Linux-5.15.0-52-generic-x86_64-with-glibc2.35 Python version: 3.9.12 Running in iPython ?: No Running in notebook ?: No Running in Google Colab ?: No Token path ?: /home/taosy/.huggingface/token Has saved token ?: False Configured git credential helpers: FastAI: N/A Tensorflow: N/A Torch: N/A Jinja2: N/A Graphviz: N/A Pydot: N/A
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5362/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5362/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5360
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5360/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5360/comments
https://api.github.com/repos/huggingface/datasets/issues/5360/events
https://github.com/huggingface/datasets/issues/5360
1,496,947,177
I_kwDODunzps5ZOZnp
5,360
IterableDataset returns duplicated data using PyTorch DDP
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "If you use huggingface trainer, you will find the trainer has wrapped a `IterableDatasetShard` to avoid duplication.\r\nSee:\r\nhttps://github.com/huggingface/transformers/blob/dfd818420dcbad68e05a502495cf666d338b2bfb/src/transformers/trainer.py#L835\r\n", "If you want to support it by datasets natively, maybe we also need to change the code in `transformers` ?", "Opened https://github.com/huggingface/transformers/issues/20770 to discuss this :)", "Maybe something like this then ?\r\n```python\r\nfrom datasets.distributed import split_dataset_by_node\r\nds = split_dataset_by_node(ds, rank=rank, world_size=world_size)\r\n```\r\n\r\nFor map-style datasets the implementation is trivial (it can simply use `.shard()`).\r\n\r\nFor iterable datasets we would need to implement a new ExamplesIterable that would only iterate on a subset of the (possibly shuffled and re-shuffled after each epoch) list of shards, based on the rank and world size.", "My plan is to skip examples by default to not end up with duplicates.\r\n\r\nAnd if a dataset has a number of shards that is a factor of the world size, then I'd make it more optimized by distributing the shards evenly across nodes instead.", "Opened a PR here: https://github.com/huggingface/datasets/pull/5369\r\n\r\nfeel free to play with it and share your feedbacks :)" ]
"2022-12-14T16:06:19"
"2023-01-16T13:33:33"
"2023-01-16T13:33:33"
MEMBER
null
As mentioned in https://github.com/huggingface/datasets/issues/3423, when using PyTorch DDP the dataset ends up with duplicated data. We already check for the PyTorch `worker_info` for single node, but we should also check for `torch.distributed.get_world_size()` and `torch.distributed.get_rank()`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5360/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5360/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5359
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5359/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5359/comments
https://api.github.com/repos/huggingface/datasets/issues/5359/events
https://github.com/huggingface/datasets/pull/5359
1,495,297,857
PR_kwDODunzps5FYHWm
5,359
Raise error if ClassLabel names is not python list
{ "login": "freddyheppell", "id": 1475568, "node_id": "MDQ6VXNlcjE0NzU1Njg=", "avatar_url": "https://avatars.githubusercontent.com/u/1475568?v=4", "gravatar_id": "", "url": "https://api.github.com/users/freddyheppell", "html_url": "https://github.com/freddyheppell", "followers_url": "https://api.github.com/users/freddyheppell/followers", "following_url": "https://api.github.com/users/freddyheppell/following{/other_user}", "gists_url": "https://api.github.com/users/freddyheppell/gists{/gist_id}", "starred_url": "https://api.github.com/users/freddyheppell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/freddyheppell/subscriptions", "organizations_url": "https://api.github.com/users/freddyheppell/orgs", "repos_url": "https://api.github.com/users/freddyheppell/repos", "events_url": "https://api.github.com/users/freddyheppell/events{/privacy}", "received_events_url": "https://api.github.com/users/freddyheppell/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-13T23:04:06"
"2022-12-22T16:35:49"
"2022-12-22T16:32:49"
CONTRIBUTOR
null
Checks type of names provided to ClassLabel to avoid easy and hard to debug errors (closes #5332 - see for discussion)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5359/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5359/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5359", "html_url": "https://github.com/huggingface/datasets/pull/5359", "diff_url": "https://github.com/huggingface/datasets/pull/5359.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5359.patch", "merged_at": "2022-12-22T16:32:49" }
true
https://api.github.com/repos/huggingface/datasets/issues/5358
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5358/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5358/comments
https://api.github.com/repos/huggingface/datasets/issues/5358/events
https://github.com/huggingface/datasets/pull/5358
1,495,270,822
PR_kwDODunzps5FYBcq
5,358
Fix `fs.open` resource leaks
{ "login": "tkukurin", "id": 297847, "node_id": "MDQ6VXNlcjI5Nzg0Nw==", "avatar_url": "https://avatars.githubusercontent.com/u/297847?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tkukurin", "html_url": "https://github.com/tkukurin", "followers_url": "https://api.github.com/users/tkukurin/followers", "following_url": "https://api.github.com/users/tkukurin/following{/other_user}", "gists_url": "https://api.github.com/users/tkukurin/gists{/gist_id}", "starred_url": "https://api.github.com/users/tkukurin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tkukurin/subscriptions", "organizations_url": "https://api.github.com/users/tkukurin/orgs", "repos_url": "https://api.github.com/users/tkukurin/repos", "events_url": "https://api.github.com/users/tkukurin/events{/privacy}", "received_events_url": "https://api.github.com/users/tkukurin/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-13T22:35:51"
"2023-01-05T16:46:31"
"2023-01-05T15:59:51"
CONTRIBUTOR
null
Invoking `{load,save}_from_dict` results in resource leak warnings, this should fix. Introduces no significant logic changes.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5358/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5358/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5358", "html_url": "https://github.com/huggingface/datasets/pull/5358", "diff_url": "https://github.com/huggingface/datasets/pull/5358.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5358.patch", "merged_at": "2023-01-05T15:59:51" }
true
https://api.github.com/repos/huggingface/datasets/issues/5357
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5357/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5357/comments
https://api.github.com/repos/huggingface/datasets/issues/5357/events
https://github.com/huggingface/datasets/pull/5357
1,495,029,602
PR_kwDODunzps5FXNyR
5,357
Support torch dataloader without torch formatting
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-13T19:39:24"
"2023-01-04T12:45:40"
"2022-12-15T19:15:54"
MEMBER
null
In https://github.com/huggingface/datasets/pull/5084 we make the torch formatting consistent with the map-style datasets formatting: a torch formatted iterable dataset will yield torch tensors. The previous behavior of the torch formatting for iterable dataset was simply to make the iterable dataset inherit from `torch.utils.data.Dataset` to make it work in a torch DataLoader. However ideally an unformatted dataset should also work with a DataLoader. To fix that, `datasets.IterableDataset` should inherit from `torch.utils.data.IterableDataset`. Since we don't want to import torch on startup, I created this PR to dynamically make the `datasets.IterableDataset` class inherit form the torch one when a `datasets.IterableDataset` is instantiated and if PyTorch is available. ```python >>> from datasets import load_dataset >>> ds = load_dataset("c4", "en", streaming=True, split="train") >>> import torch.utils.data >>> isinstance(ds, torch.utils.data.IterableDataset) True >>> dataloader = torch.utils.data.DataLoader(ds, batch_size=32, num_workers=4) >>> for example in dataloader: ...: ... ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5357/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5357/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5357", "html_url": "https://github.com/huggingface/datasets/pull/5357", "diff_url": "https://github.com/huggingface/datasets/pull/5357.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5357.patch", "merged_at": "2022-12-15T19:15:54" }
true
https://api.github.com/repos/huggingface/datasets/issues/5356
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5356/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5356/comments
https://api.github.com/repos/huggingface/datasets/issues/5356/events
https://github.com/huggingface/datasets/pull/5356
1,494,961,609
PR_kwDODunzps5FW-c9
5,356
Clean filesystem and logging docstrings
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-13T18:54:09"
"2022-12-14T17:25:58"
"2022-12-14T17:22:16"
MEMBER
null
This PR cleans the `Filesystems` and `Logging` docstrings.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5356/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5356/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5356", "html_url": "https://github.com/huggingface/datasets/pull/5356", "diff_url": "https://github.com/huggingface/datasets/pull/5356.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5356.patch", "merged_at": "2022-12-14T17:22:16" }
true
https://api.github.com/repos/huggingface/datasets/issues/5355
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5355/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5355/comments
https://api.github.com/repos/huggingface/datasets/issues/5355/events
https://github.com/huggingface/datasets/pull/5355
1,493,076,860
PR_kwDODunzps5FQcYG
5,355
Clean up Table class docstrings
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-13T00:29:47"
"2022-12-13T18:17:56"
"2022-12-13T18:14:42"
MEMBER
null
This PR cleans up the `Table` class docstrings :)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5355/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5355/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5355", "html_url": "https://github.com/huggingface/datasets/pull/5355", "diff_url": "https://github.com/huggingface/datasets/pull/5355.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5355.patch", "merged_at": "2022-12-13T18:14:42" }
true
https://api.github.com/repos/huggingface/datasets/issues/5353
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5353/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5353/comments
https://api.github.com/repos/huggingface/datasets/issues/5353/events
https://github.com/huggingface/datasets/issues/5353
1,491,880,500
I_kwDODunzps5Y7Eo0
5,353
Support remote file systems for `Audio`
{ "login": "OllieBroadhurst", "id": 46894149, "node_id": "MDQ6VXNlcjQ2ODk0MTQ5", "avatar_url": "https://avatars.githubusercontent.com/u/46894149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/OllieBroadhurst", "html_url": "https://github.com/OllieBroadhurst", "followers_url": "https://api.github.com/users/OllieBroadhurst/followers", "following_url": "https://api.github.com/users/OllieBroadhurst/following{/other_user}", "gists_url": "https://api.github.com/users/OllieBroadhurst/gists{/gist_id}", "starred_url": "https://api.github.com/users/OllieBroadhurst/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/OllieBroadhurst/subscriptions", "organizations_url": "https://api.github.com/users/OllieBroadhurst/orgs", "repos_url": "https://api.github.com/users/OllieBroadhurst/repos", "events_url": "https://api.github.com/users/OllieBroadhurst/events{/privacy}", "received_events_url": "https://api.github.com/users/OllieBroadhurst/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Just seen https://github.com/huggingface/datasets/issues/5281" ]
"2022-12-12T13:22:13"
"2022-12-12T13:37:14"
"2022-12-12T13:37:14"
NONE
null
### Feature request Hi there! It would be super cool if `Audio()`, and potentially other features, could read files from a remote file system. ### Motivation Large amounts of data is often stored in buckets. `load_from_disk` is able to retrieve data from cloud storage but to my knowledge actually copies the datasets across first, so if you're working off a system with smaller disk specs (like a VM), you can run out of space very quickly. ### Your contribution Something like this (for Google Cloud Platform in this instance): ```python from datasets import Dataset, Audio import gcsfs fs = gcsfs.GCSFileSystem() list_of_audio_fp = {'audio': ['1', '2', '3']} ds = Dataset.from_dict(list_of_audio_fp) ds = ds.cast_column("audio", Audio(sampling_rate=16000, fs=fs)) ``` Under the hood: ```python import librosa from io import BytesIO def load_audio(fp, sampling_rate=None, fs=None): if fs is not None: with fs.open(fp, 'rb') as f: arr, sr = librosa.load(BytesIO(f), sr=sampling_rate) else: # Perform existing io operations ``` Written from memory so some things could be wrong.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5353/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5353/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5351
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5351/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5351/comments
https://api.github.com/repos/huggingface/datasets/issues/5351/events
https://github.com/huggingface/datasets/issues/5351
1,490,659,504
I_kwDODunzps5Y2aiw
5,351
Do we need to implement `_prepare_split`?
{ "login": "jmwoloso", "id": 7530947, "node_id": "MDQ6VXNlcjc1MzA5NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/7530947?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jmwoloso", "html_url": "https://github.com/jmwoloso", "followers_url": "https://api.github.com/users/jmwoloso/followers", "following_url": "https://api.github.com/users/jmwoloso/following{/other_user}", "gists_url": "https://api.github.com/users/jmwoloso/gists{/gist_id}", "starred_url": "https://api.github.com/users/jmwoloso/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmwoloso/subscriptions", "organizations_url": "https://api.github.com/users/jmwoloso/orgs", "repos_url": "https://api.github.com/users/jmwoloso/repos", "events_url": "https://api.github.com/users/jmwoloso/events{/privacy}", "received_events_url": "https://api.github.com/users/jmwoloso/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! `DatasetBuilder` is a parent class for concrete builders: `GeneratorBasedBuilder`, `ArrowBasedBuilder` and `BeamBasedBuilder`. When writing a builder script, these classes are the ones you should inherit from. And since all of them implement `_prepare_split`, you only have to implement the three methods mentioned above.", "Thanks so much @mariosasko for the fast response! I've been referencing [this page in the docs](https://huggingface.co/docs/datasets/v2.4.0/en/about_dataset_load) because it it pretty comprehensive in terms of what we have to do and I figured since we subclass the `BuilderConfig` the same pattern would hold, but I've also seen the page with those sub-classed builders as well, so that fills in a knowledge gap for me.", "cc @stevhliu who may have some ideas on how to improve this part of the docs.", "one more question for my understanding @mariosasko. the requirement of a loading script has always seemed counterintuitive to me. if i have to provide a script with every dataset, what is the point of using `datasets` if we're doing all the work of loading it, I can just do that in my code and skip the datasets integration (this of course discounts other potential benefits around metadata management, etc., my example is just simplest use case though for the sake of discussion).\r\n\r\nso i figured I would implement my own `BuilderConfig` and `DatasetBuilder` to handle that portion of it and not have to make a script. i _thought_ this would result in `datasets` (via `download_and_prepare`) then making me something that I could load using `load_dataset` moving forward.\r\n\r\nConcretely, i envisioned this pattern being possible:\r\n\r\n ```\r\nclass MyBuilderConfig(BuilderConfig):\r\n def __init__(self, name=\"my_named_dataset\", ...):\r\n super().__init__(name, ...)\r\n\r\nclass MyDatasetBuilder(GeneratorBasedBuilder):\r\n BUILDER_CONFIG_CLASS = MyBuilderConfig\r\n ....\r\n\r\nmy_builder = MyDatasetBuilder(...)\r\n\r\n# this doesn't exactly work like I thought; I don't get a dataset back, but NoneType instead\r\n# though I can see it loading the files and it generates the cache, etc.\r\nmy_dataset = my_builder.download_and_prepare()\r\n\r\n# load the dataset in the future by referencing it by name and loading from the cached arrow version\r\nnew_instance_of_my_dataset = load_dataset(\"my_named_dataset\")\r\n```\r\n\r\nI've seen references to the `save_to_disk` method which might be the next step I need in order to load it by name, in which case, that makes sense, then i just need to debug why `download_and_prepare` isn't returning me a dataset, but I feel like I still have a larger conceptual knowledge gap on how to use the library correctly.\r\n\r\nThanks again in advance!", "> the requirement of a loading script has always seemed counterintuitive to me\r\n\r\nThis is a requirement only for datasets not stored in standard formats such as CSV, JSON, SQL, Parquet, ImageFolder, etc. \r\n\r\n> if i have to provide a script with every dataset, what is the point of using datasets if we're doing all the work of loading it, I can just do that in my code and skip the datasets integration (this of course discounts other potential benefits around metadata management, etc., my example is just simplest use case though for the sake of discussion)\r\n\r\nOur README/documentation lists the main features... \r\n\r\nOne of the main ones is that our library makes it easy to work with datasets larger than RAM (thanks to Arrow and the caching mechanism), and this is not trivial to implement.\r\n\r\nRegarding the step-by-step builder, this is the pattern:\r\n```python\r\nfrom datasets import load_dataset_builder\r\nbuilder = load_dataset_builder(\"path/to/script\") # or direct instantiation with MyDatasetBuilder(...)\r\nbuilder.download_and_prepare()\r\ndset = builder.as_dataset()\r\n```", "ok, that makes sense. thank you @mariosasko. I realized i'd never looked on the hub at any of the files associated with any datasets. just did that now and it appears that i'll need to have a script regardless _but_ that will just contain my custom config and builder classes, so without realizing it I was already making my script, I just need to wrap that in a file that sits alongside my data (I looked at Glue and realized I was already doing what I thought didn't make sense to have to do, lol).\r\n\r\n`download_and_prepare` isn't returning me a dataset though, but I'll look into that and open another issue if I can't figure it out.", "`download_and_prepare` downloads and prepares the arrow files. You need to call `as_dataset` on the builder to get the dataset.", "ok, I think I was assigning the output of `builder.download_and_prepare` but it's an inplace op, so that explains the `NoneType` i was getting back. Now I'm getting:\r\n\r\n```\r\nArrowInvalid Traceback (most recent call last)\r\n<ipython-input-7-3ed50fb87c70> in <module>\r\n----> 1 ds = dataset_builder.as_dataset()\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/builder.py in as_dataset(self, split, run_post_process, ignore_verifications, in_memory)\r\n 1020 \r\n 1021 # Create a dataset for each of the given splits\r\n-> 1022 datasets = map_nested(\r\n 1023 partial(\r\n 1024 self._build_single_dataset,\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/utils/py_utils.py in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, types, disable_tqdm, desc)\r\n 442 num_proc = 1\r\n 443 if num_proc <= 1 or len(iterable) < parallel_min_length:\r\n--> 444 mapped = [\r\n 445 _single_map_nested((function, obj, types, None, True, None))\r\n 446 for obj in logging.tqdm(iterable, disable=disable_tqdm, desc=desc)\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/utils/py_utils.py in <listcomp>(.0)\r\n 443 if num_proc <= 1 or len(iterable) < parallel_min_length:\r\n 444 mapped = [\r\n--> 445 _single_map_nested((function, obj, types, None, True, None))\r\n 446 for obj in logging.tqdm(iterable, disable=disable_tqdm, desc=desc)\r\n 447 ]\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/utils/py_utils.py in _single_map_nested(args)\r\n 344 # Singleton first to spare some computation\r\n 345 if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\r\n--> 346 return function(data_struct)\r\n 347 \r\n 348 # Reduce logging to keep things readable in multiprocessing with tqdm\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/builder.py in _build_single_dataset(self, split, run_post_process, ignore_verifications, in_memory)\r\n 1051 \r\n 1052 # Build base dataset\r\n-> 1053 ds = self._as_dataset(\r\n 1054 split=split,\r\n 1055 in_memory=in_memory,\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/builder.py in _as_dataset(self, split, in_memory)\r\n 1120 \"\"\"\r\n 1121 cache_dir = self._fs._strip_protocol(self._output_dir)\r\n-> 1122 dataset_kwargs = ArrowReader(cache_dir, self.info).read(\r\n 1123 name=self.name,\r\n 1124 instructions=split,\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/arrow_reader.py in read(self, name, instructions, split_infos, in_memory)\r\n 236 msg = f'Instruction \"{instructions}\" corresponds to no data!'\r\n 237 raise ValueError(msg)\r\n--> 238 return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)\r\n 239 \r\n 240 def read_files(\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/arrow_reader.py in read_files(self, files, original_instructions, in_memory)\r\n 257 \"\"\"\r\n 258 # Prepend path to filename\r\n--> 259 pa_table = self._read_files(files, in_memory=in_memory)\r\n 260 # If original_instructions is not None, convert it to a human-readable NamedSplit\r\n 261 if original_instructions is not None:\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/arrow_reader.py in _read_files(self, files, in_memory)\r\n 192 f[\"filename\"] = os.path.join(self._path, f[\"filename\"])\r\n 193 for f_dict in files:\r\n--> 194 pa_table: Table = self._get_table_from_filename(f_dict, in_memory=in_memory)\r\n 195 pa_tables.append(pa_table)\r\n 196 pa_tables = [t for t in pa_tables if len(t) > 0]\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/arrow_reader.py in _get_table_from_filename(self, filename_skip_take, in_memory)\r\n 327 filename_skip_take[\"take\"] if \"take\" in filename_skip_take else None,\r\n 328 )\r\n--> 329 table = ArrowReader.read_table(filename, in_memory=in_memory)\r\n 330 if take == -1:\r\n 331 take = len(table) - skip\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/arrow_reader.py in read_table(filename, in_memory)\r\n 348 \"\"\"\r\n 349 table_cls = InMemoryTable if in_memory else MemoryMappedTable\r\n--> 350 return table_cls.from_file(filename)\r\n 351 \r\n 352 \r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/table.py in from_file(cls, filename, replays)\r\n 1034 @classmethod\r\n 1035 def from_file(cls, filename: str, replays=None):\r\n-> 1036 table = _memory_mapped_arrow_table_from_file(filename)\r\n 1037 table = cls._apply_replays(table, replays)\r\n 1038 return cls(table, filename, replays)\r\n\r\n/databricks/python/lib/python3.8/site-packages/datasets/table.py in _memory_mapped_arrow_table_from_file(filename)\r\n 48 def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:\r\n 49 memory_mapped_stream = pa.memory_map(filename)\r\n---> 50 opened_stream = pa.ipc.open_stream(memory_mapped_stream)\r\n 51 pa_table = opened_stream.read_all()\r\n 52 return pa_table\r\n\r\n/databricks/python/lib/python3.8/site-packages/pyarrow/ipc.py in open_stream(source)\r\n 152 reader : RecordBatchStreamReader\r\n 153 \"\"\"\r\n--> 154 return RecordBatchStreamReader(source)\r\n 155 \r\n 156 \r\n\r\n/databricks/python/lib/python3.8/site-packages/pyarrow/ipc.py in __init__(self, source)\r\n 43 \r\n 44 def __init__(self, source):\r\n---> 45 self._open(source)\r\n 46 \r\n 47 \r\n\r\n/databricks/python/lib/python3.8/site-packages/pyarrow/ipc.pxi in pyarrow.lib._RecordBatchStreamReader._open()\r\n\r\n/databricks/python/lib/python3.8/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status()\r\n\r\n/databricks/python/lib/python3.8/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status()\r\n\r\nArrowInvalid: Tried reading schema message, was null or length 0\r\n```\r\n\r\n", "looks like my arrow files are all empty @mariosasko \r\n\r\n![image](https://user-images.githubusercontent.com/7530947/208179977-9ae62c9a-866c-472b-9a09-25d1191188fb.png)\r\n\r\n\r\ni also see the `incomplete_info.lock` file a level up too. seems like the data isn't being persisted to disk when I call `download_and_prepare`. is there something else i need to do before then, perhaps?", "quick update @mariosasko. i got it working! i had to downgrade to `datasets==2.4.0`. testing other versions now and will let you know the results.", "I've tested with every version of `datasets>2.4.0` and i get the same error with all of them." ]
"2022-12-12T01:38:54"
"2022-12-20T18:20:57"
"2022-12-12T16:48:56"
NONE
null
### Describe the bug I'm not sure this is a bug or if it's just missing in the documentation, or i'm not doing something correctly, but I'm subclassing `DatasetBuilder` and getting the following error because on the `DatasetBuilder` class the `_prepare_split` method is abstract (as are the others we are required to implement, hence the genesis of my question): ``` Traceback (most recent call last): File "/home/jason/source/python/prism_machine_learning/examples/create_hf_datasets.py", line 28, in <module> dataset_builder.download_and_prepare() File "/home/jason/.virtualenvs/pml/lib/python3.8/site-packages/datasets/builder.py", line 704, in download_and_prepare self._download_and_prepare( File "/home/jason/.virtualenvs/pml/lib/python3.8/site-packages/datasets/builder.py", line 793, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/jason/.virtualenvs/pml/lib/python3.8/site-packages/datasets/builder.py", line 1124, in _prepare_split raise NotImplementedError() NotImplementedError ``` ### Steps to reproduce the bug I will share implementation if it turns out that everything should be working (i.e. we only need to implement those 3 methods the docs mention), but I don't want to distract from the original question. ### Expected behavior I just need to know if there are additional methods we need to implement when subclassing `DatasetBuilder` besides what the documentation specifies -> `_info`, `_split_generators` and `_generate_examples` ### Environment info - `datasets` version: 2.4.0 - Platform: Linux-5.4.0-135-generic-x86_64-with-glibc2.2.5 - Python version: 3.8.12 - PyArrow version: 7.0.0 - Pandas version: 1.4.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5351/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5351/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5350
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5350/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5350/comments
https://api.github.com/repos/huggingface/datasets/issues/5350/events
https://github.com/huggingface/datasets/pull/5350
1,487,559,904
PR_kwDODunzps5E8y2E
5,350
Clean up Loading methods docstrings
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-09T22:25:30"
"2022-12-12T17:27:20"
"2022-12-12T17:24:01"
MEMBER
null
Clean up for the docstrings in Loading methods!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5350/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5350/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5350", "html_url": "https://github.com/huggingface/datasets/pull/5350", "diff_url": "https://github.com/huggingface/datasets/pull/5350.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5350.patch", "merged_at": "2022-12-12T17:24:01" }
true
https://api.github.com/repos/huggingface/datasets/issues/5349
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5349/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5349/comments
https://api.github.com/repos/huggingface/datasets/issues/5349/events
https://github.com/huggingface/datasets/pull/5349
1,487,396,780
PR_kwDODunzps5E8N6G
5,349
Clean up remaining Main Classes docstrings
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-09T20:17:15"
"2022-12-12T17:27:17"
"2022-12-12T17:24:13"
MEMBER
null
This PR cleans up the remaining docstrings in Main Classes (`IterableDataset`, `IterableDatasetDict`, and `Features`).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5349/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5349/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5349", "html_url": "https://github.com/huggingface/datasets/pull/5349", "diff_url": "https://github.com/huggingface/datasets/pull/5349.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5349.patch", "merged_at": "2022-12-12T17:24:13" }
true
https://api.github.com/repos/huggingface/datasets/issues/5346
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5346/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5346/comments
https://api.github.com/repos/huggingface/datasets/issues/5346/events
https://github.com/huggingface/datasets/issues/5346
1,486,884,983
I_kwDODunzps5YoBB3
5,346
[Quick poll] Give your opinion on the future of the Hugging Face Open Source ecosystem!
{ "login": "LysandreJik", "id": 30755778, "node_id": "MDQ6VXNlcjMwNzU1Nzc4", "avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LysandreJik", "html_url": "https://github.com/LysandreJik", "followers_url": "https://api.github.com/users/LysandreJik/followers", "following_url": "https://api.github.com/users/LysandreJik/following{/other_user}", "gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}", "starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions", "organizations_url": "https://api.github.com/users/LysandreJik/orgs", "repos_url": "https://api.github.com/users/LysandreJik/repos", "events_url": "https://api.github.com/users/LysandreJik/events{/privacy}", "received_events_url": "https://api.github.com/users/LysandreJik/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "As the survey is finished, can we close this issue, @LysandreJik ?", "Yes! I'll post a public summary on the forums shortly." ]
"2022-12-09T14:48:02"
"2023-01-25T19:35:41"
"2023-01-25T19:35:40"
MEMBER
null
Thanks to all of you, Datasets is just about to pass 15k stars! Since the last survey, a lot has happened: the [diffusers](https://github.com/huggingface/diffusers), [evaluate](https://github.com/huggingface/evaluate) and [skops](https://github.com/skops-dev/skops) libraries were born. `timm` joined the Hugging Face ecosystem. There were 25 new releases of `transformers`, 21 new releases of `datasets`, 13 new releases of `accelerate`. If you have a couple of minutes and want to participate in shaping the future of the ecosystem, please share your thoughts: [**hf.co/oss-survey**](https://docs.google.com/forms/d/e/1FAIpQLSf4xFQKtpjr6I_l7OfNofqiR8s-WG6tcNbkchDJJf5gYD72zQ/viewform?usp=sf_link) (please reply in the above feedback form rather than to this thread) Thank you all on behalf of the HuggingFace team! 🤗
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5346/reactions", "total_count": 3, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 3, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5346/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5344
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5344/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5344/comments
https://api.github.com/repos/huggingface/datasets/issues/5344/events
https://github.com/huggingface/datasets/pull/5344
1,485,628,319
PR_kwDODunzps5E2BPN
5,344
Clean up Dataset and DatasetDict
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-09T00:02:08"
"2022-12-13T00:56:07"
"2022-12-13T00:53:02"
MEMBER
null
This PR cleans up the docstrings for the other half of the methods in `Dataset` and finishes `DatasetDict`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5344/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5344/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5344", "html_url": "https://github.com/huggingface/datasets/pull/5344", "diff_url": "https://github.com/huggingface/datasets/pull/5344.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5344.patch", "merged_at": "2022-12-13T00:53:01" }
true
https://api.github.com/repos/huggingface/datasets/issues/5343
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5343/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5343/comments
https://api.github.com/repos/huggingface/datasets/issues/5343/events
https://github.com/huggingface/datasets/issues/5343
1,485,297,823
I_kwDODunzps5Yh9if
5,343
T5 for Q&A produces truncated sentence
{ "login": "junyongyou", "id": 13484072, "node_id": "MDQ6VXNlcjEzNDg0MDcy", "avatar_url": "https://avatars.githubusercontent.com/u/13484072?v=4", "gravatar_id": "", "url": "https://api.github.com/users/junyongyou", "html_url": "https://github.com/junyongyou", "followers_url": "https://api.github.com/users/junyongyou/followers", "following_url": "https://api.github.com/users/junyongyou/following{/other_user}", "gists_url": "https://api.github.com/users/junyongyou/gists{/gist_id}", "starred_url": "https://api.github.com/users/junyongyou/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/junyongyou/subscriptions", "organizations_url": "https://api.github.com/users/junyongyou/orgs", "repos_url": "https://api.github.com/users/junyongyou/repos", "events_url": "https://api.github.com/users/junyongyou/events{/privacy}", "received_events_url": "https://api.github.com/users/junyongyou/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-08T19:48:46"
"2022-12-08T19:57:17"
"2022-12-08T19:57:17"
NONE
null
Dear all, I am fine-tuning T5 for Q&A task using the MedQuAD ([GitHub - abachaa/MedQuAD: Medical Question Answering Dataset of 47,457 QA pairs created from 12 NIH websites](https://github.com/abachaa/MedQuAD)) dataset. In the dataset, there are many long answers with thousands of words. I have used pytorch_lightning to train the T5-large model. I have two questions. For example, I set both the max_length, max_input_length, max_output_length to 128. How to deal with those long answers? I just left them as is and the T5Tokenizer can automatically handle. I would assume the tokenizer just truncates an answer at the position of 128th word (or 127th). Is it possible that I manually split an answer into different parts, each part has 128 words; and then all these sub-answers serve as a separate answer to the same question? Another question is that I get incomplete (truncated) answers when using the fine-tuned model in inference, even though the predicted answer is shorter than 128 words. I found a message posted 2 years ago saying that one should add at the end of texts when fine-tuning T5. I followed that but then got a warning message that duplicated were found. I am assuming that this is because the tokenizer truncates an answer text, thus is missing in the truncated answer, such that the end token is not produced in predicted answer. However, I am not sure. Can anybody point out how to address this issue? Any suggestions are highly appreciated. Below is some code snippet. ` import pytorch_lightning as pl from torch.utils.data import DataLoader import torch import numpy as np import time from pathlib import Path from transformers import ( Adafactor, T5ForConditionalGeneration, T5Tokenizer, get_linear_schedule_with_warmup ) from torch.utils.data import RandomSampler from question_answering.utils import * class T5FineTuner(pl.LightningModule): def __init__(self, hyparams): super(T5FineTuner, self).__init__() self.hyparams = hyparams self.model = T5ForConditionalGeneration.from_pretrained(hyparams.model_name_or_path) self.tokenizer = T5Tokenizer.from_pretrained(hyparams.tokenizer_name_or_path) if self.hyparams.freeze_embeds: self.freeze_embeds() if self.hyparams.freeze_encoder: self.freeze_params(self.model.get_encoder()) # assert_all_frozen() self.step_count = 0 self.output_dir = Path(self.hyparams.output_dir) n_observations_per_split = { 'train': self.hyparams.n_train, 'validation': self.hyparams.n_val, 'test': self.hyparams.n_test } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.em_score_list = [] self.subset_score_list = [] data_folder = r'C:\Datasets\MedQuAD-master' self.train_data, self.val_data, self.test_data = load_medqa_data(data_folder) def freeze_params(self, model): for param in model.parameters(): param.requires_grad = False def freeze_embeds(self): try: self.freeze_params(self.model.model.shared) for d in [self.model.model.encoder, self.model.model.decoder]: self.freeze_params(d.embed_positions) self.freeze_params(d.embed_tokens) except AttributeError: self.freeze_params(self.model.shared) for d in [self.model.encoder, self.model.decoder]: self.freeze_params(d.embed_tokens) def lmap(self, f, x): return list(map(f, x)) def is_logger(self): return self.trainer.proc_rank <= 0 def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, labels=None): return self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels ) def _step(self, batch): labels = batch['target_ids'] labels[labels[:, :] == self.tokenizer.pad_token_id] = -100 outputs = self( input_ids = batch['source_ids'], attention_mask=batch['source_mask'], labels=labels, decoder_attention_mask=batch['target_mask'] ) loss = outputs[0] return loss def ids_to_clean_text(self, generated_ids): gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True) return self.lmap(str.strip, gen_text) def _generative_step(self, batch): t0 = time.time() generated_ids = self.model.generate( batch["source_ids"], attention_mask=batch["source_mask"], use_cache=True, decoder_attention_mask=batch['target_mask'], max_length=128, num_beams=2, early_stopping=True ) preds = self.ids_to_clean_text(generated_ids) targets = self.ids_to_clean_text(batch["target_ids"]) gen_time = (time.time() - t0) / batch["source_ids"].shape[0] loss = self._step(batch) base_metrics = {'val_loss': loss} summ_len = np.mean(self.lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=targets) em_score, subset_match_score = calculate_scores(preds, targets) self.em_score_list.append(em_score) self.subset_score_list.append(subset_match_score) em_score = torch.tensor(em_score, dtype=torch.float32) subset_match_score = torch.tensor(subset_match_score, dtype=torch.float32) base_metrics.update(em_score=em_score, subset_match_score=subset_match_score) # rouge_results = self.rouge_metric.compute() # rouge_dict = self.parse_score(rouge_results) return base_metrics def training_step(self, batch, batch_idx): loss = self._step(batch) tensorboard_logs = {'train_loss': loss} return {'loss': loss, 'log': tensorboard_logs} def training_epoch_end(self, outputs): avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean() tensorboard_logs = {'avg_train_loss': avg_train_loss} # return {'avg_train_loss': avg_train_loss, 'log': tensorboard_logs, 'progress_bar': tensorboard_logs} def validation_step(self, batch, batch_idx): return self._generative_step(batch) def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} if len(self.em_score_list) <= 2: average_em_score = sum(self.em_score_list) / len(self.em_score_list) average_subset_match_score = sum(self.subset_score_list) / len(self.subset_score_list) else: latest_em_score = self.em_score_list[:-2] latest_subset_score = self.subset_score_list[:-2] average_em_score = sum(latest_em_score) / len(latest_em_score) average_subset_match_score = sum(latest_subset_score) / len(latest_subset_score) average_em_score = torch.tensor(average_em_score, dtype=torch.float32) average_subset_match_score = torch.tensor(average_subset_match_score, dtype=torch.float32) tensorboard_logs.update(em_score=average_em_score, subset_match_score=average_subset_match_score) self.target_gen = [] self.prediction_gen = [] return { 'avg_val_loss': avg_loss, 'em_score': average_em_score, 'subset_match_socre': average_subset_match_score, 'log': tensorboard_logs, 'progress_bar': tensorboard_logs } def configure_optimizers(self): model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hyparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hyparams.learning_rate, scale_parameter=False, relative_step=False) self.opt = optimizer return [optimizer] def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure=None, on_tpu=False, using_native_amp=False, using_lbfgs=False): optimizer.step(closure=optimizer_closure) optimizer.zero_grad() self.lr_scheduler.step() def get_tqdm_dict(self): tqdm_dict = {"loss": "{:.3f}".format(self.trainer.avg_loss), "lr": self.lr_scheduler.get_last_lr()[-1]} return tqdm_dict def train_dataloader(self): n_samples = self.n_obs['train'] train_dataset = get_dataset(tokenizer=self.tokenizer, data=self.train_data, num_samples=n_samples, args=self.hyparams) sampler = RandomSampler(train_dataset) dataloader = DataLoader(train_dataset, sampler=sampler, batch_size=self.hyparams.train_batch_size, drop_last=True, num_workers=4) # t_total = ( # (len(dataloader.dataset) // (self.hyparams.train_batch_size * max(1, self.hyparams.n_gpu))) # // self.hyparams.gradient_accumulation_steps # * float(self.hyparams.num_train_epochs) # ) t_total = 100000 scheduler = get_linear_schedule_with_warmup( self.opt, num_warmup_steps=self.hyparams.warmup_steps, num_training_steps=t_total ) self.lr_scheduler = scheduler return dataloader def val_dataloader(self): n_samples = self.n_obs['validation'] validation_dataset = get_dataset(tokenizer=self.tokenizer, data=self.val_data, num_samples=n_samples, args=self.hyparams) sampler = RandomSampler(validation_dataset) return DataLoader(validation_dataset, shuffle=False, batch_size=self.hyparams.eval_batch_size, sampler=sampler, num_workers=4) def test_dataloader(self): n_samples = self.n_obs['test'] test_dataset = get_dataset(tokenizer=self.tokenizer, data=self.test_data, num_samples=n_samples, args=self.hyparams) return DataLoader(test_dataset, batch_size=self.hyparams.eval_batch_size, num_workers=4) def on_save_checkpoint(self, checkpoint): save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) import os import argparse import pytorch_lightning as pl from question_answering.t5_closed_book import T5FineTuner if __name__ == '__main__': args_dict = dict( output_dir="", # path to save the checkpoints model_name_or_path='t5-large', tokenizer_name_or_path='t5-large', max_input_length=128, max_output_length=128, freeze_encoder=False, freeze_embeds=False, learning_rate=1e-5, weight_decay=0.0, adam_epsilon=1e-8, warmup_steps=0, train_batch_size=4, eval_batch_size=4, num_train_epochs=2, gradient_accumulation_steps=10, n_gpu=1, resume_from_checkpoint=None, val_check_interval=0.5, n_val=4000, n_train=-1, n_test=-1, early_stop_callback=False, fp_16=False, opt_level='O1', max_grad_norm=1.0, seed=101, ) args_dict.update({'output_dir': 't5_large_MedQuAD_256', 'num_train_epochs': 100, 'train_batch_size': 16, 'eval_batch_size': 16, 'learning_rate': 1e-3}) args = argparse.Namespace(**args_dict) checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor="em_score", mode="max", save_top_k=1) ## If resuming from checkpoint, add an arg resume_from_checkpoint train_params = dict( accumulate_grad_batches=args.gradient_accumulation_steps, gpus=args.n_gpu, max_epochs=args.num_train_epochs, # early_stop_callback=False, precision=16 if args.fp_16 else 32, # amp_level=args.opt_level, # resume_from_checkpoint=args.resume_from_checkpoint, gradient_clip_val=args.max_grad_norm, checkpoint_callback=checkpoint_callback, val_check_interval=args.val_check_interval, # accelerator='dp' # logger=wandb_logger, # callbacks=[LoggingCallback()], ) model = T5FineTuner(args) trainer = pl.Trainer(**train_params) trainer.fit(model) `
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5343/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5343/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5342
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5342/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5342/comments
https://api.github.com/repos/huggingface/datasets/issues/5342/events
https://github.com/huggingface/datasets/issues/5342
1,485,244,178
I_kwDODunzps5YhwcS
5,342
Emotion dataset cannot be downloaded
{ "login": "cbarond", "id": 78887193, "node_id": "MDQ6VXNlcjc4ODg3MTkz", "avatar_url": "https://avatars.githubusercontent.com/u/78887193?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cbarond", "html_url": "https://github.com/cbarond", "followers_url": "https://api.github.com/users/cbarond/followers", "following_url": "https://api.github.com/users/cbarond/following{/other_user}", "gists_url": "https://api.github.com/users/cbarond/gists{/gist_id}", "starred_url": "https://api.github.com/users/cbarond/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cbarond/subscriptions", "organizations_url": "https://api.github.com/users/cbarond/orgs", "repos_url": "https://api.github.com/users/cbarond/repos", "events_url": "https://api.github.com/users/cbarond/events{/privacy}", "received_events_url": "https://api.github.com/users/cbarond/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892865, "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate", "name": "duplicate", "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists" } ]
closed
false
null
[]
null
[ "Hi @cbarond there's already an open issue at https://github.com/dair-ai/emotion_dataset/issues/5, as the data seems to be missing now, so check that issue instead 👍🏻 ", "Thanks @cbarond for reporting and @alvarobartt for pointing to the issue we opened in the author's repo.\r\n\r\nIndeed, this issue was first raised in the \"emotion\" dataset Community tab: https://huggingface.co/datasets/emotion/discussions/3\r\n\r\nI'm closing this issue and leave the issue above for the subsequent updates.\r\n\r\nDuplicate of: https://huggingface.co/datasets/emotion/discussions/3", "try using \"SetFit/emotion\" instead", "> try using \"SetFit/emotion\" instead\r\n\r\nI' replaced \"emotion\" with \"SetFit/Emotion\", but the code is getting stuck at\r\n\r\n`emotions = load_dataset(\"SetFit/emotion\")`\r\n\r\nI pause execution using the debugger, and it takes me to filelock.py:226\r\n\r\n`with self._thread_lock:`\r\n\r\nDo you know a way to get past this issue?", "thanks @honeyimholm - worked for me", "> try using \"SetFit/emotion\" instead\r\n\r\nIt really helps a lot, thank you!", "The dataset loading script has been fixed: https://huggingface.co/datasets/emotion/discussions/4" ]
"2022-12-08T19:07:09"
"2023-02-23T19:13:19"
"2022-12-09T10:46:11"
NONE
null
### Describe the bug The emotion dataset gives a FileNotFoundError. The full error is: `FileNotFoundError: Couldn't find file at https://www.dropbox.com/s/1pzkadrvffbqw6o/train.txt?dl=1`. It was working yesterday (December 7, 2022), but stopped working today (December 8, 2022). ### Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("emotion") ``` ### Expected behavior The dataset should load properly. ### Environment info - `datasets` version: 2.7.1 - Platform: Windows-10-10.0.19045-SP0 - Python version: 3.9.13 - PyArrow version: 10.0.1 - Pandas version: 1.5.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5342/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5342/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5341
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5341/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5341/comments
https://api.github.com/repos/huggingface/datasets/issues/5341/events
https://github.com/huggingface/datasets/pull/5341
1,484,376,644
PR_kwDODunzps5Exohx
5,341
Remove tasks.json
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-08T11:04:35"
"2022-12-09T12:26:21"
"2022-12-09T12:23:20"
MEMBER
null
After discussions in https://github.com/huggingface/datasets/pull/5335 we should remove this file that is not used anymore. We should update https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts instead.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5341/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5341/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5341", "html_url": "https://github.com/huggingface/datasets/pull/5341", "diff_url": "https://github.com/huggingface/datasets/pull/5341.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5341.patch", "merged_at": "2022-12-09T12:23:20" }
true
https://api.github.com/repos/huggingface/datasets/issues/5340
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5340/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5340/comments
https://api.github.com/repos/huggingface/datasets/issues/5340/events
https://github.com/huggingface/datasets/pull/5340
1,483,182,158
PR_kwDODunzps5EtWo3
5,340
Clean up DatasetInfo and Dataset docstrings
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-08T00:17:53"
"2022-12-08T19:33:14"
"2022-12-08T19:30:10"
MEMBER
null
This PR cleans up the docstrings for `DatasetInfo` and about half of the methods in `Dataset`.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5340/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5340/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5340", "html_url": "https://github.com/huggingface/datasets/pull/5340", "diff_url": "https://github.com/huggingface/datasets/pull/5340.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5340.patch", "merged_at": "2022-12-08T19:30:10" }
true
https://api.github.com/repos/huggingface/datasets/issues/5338
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5338/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5338/comments
https://api.github.com/repos/huggingface/datasets/issues/5338/events
https://github.com/huggingface/datasets/issues/5338
1,482,646,151
I_kwDODunzps5YX2KH
5,338
`map()` stops every 1000 steps
{ "login": "bayartsogt-ya", "id": 43239645, "node_id": "MDQ6VXNlcjQzMjM5NjQ1", "avatar_url": "https://avatars.githubusercontent.com/u/43239645?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bayartsogt-ya", "html_url": "https://github.com/bayartsogt-ya", "followers_url": "https://api.github.com/users/bayartsogt-ya/followers", "following_url": "https://api.github.com/users/bayartsogt-ya/following{/other_user}", "gists_url": "https://api.github.com/users/bayartsogt-ya/gists{/gist_id}", "starred_url": "https://api.github.com/users/bayartsogt-ya/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bayartsogt-ya/subscriptions", "organizations_url": "https://api.github.com/users/bayartsogt-ya/orgs", "repos_url": "https://api.github.com/users/bayartsogt-ya/repos", "events_url": "https://api.github.com/users/bayartsogt-ya/events{/privacy}", "received_events_url": "https://api.github.com/users/bayartsogt-ya/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi !\r\n\r\n> It starts using all the cores (I am not sure why because I did not pass num_proc)\r\n\r\nThe tokenizer uses Rust code that is multithreaded. And maybe the `feature_extractor` might run some things in parallel as well - but I'm not super familiar with its internals.\r\n\r\n> then progress bar stops at every 1k steps. (starts using a single core)\r\n\r\nEvery 1000 examples we flush the processed examples to disk. It is this way because Arrow is a columnar format: you must write data chunk by chunk. The processing in on hold while writing right now - maybe this can be improved in the future.", "Hi @lhoestq \r\nThanks for the explanation! it was so helpful! Let me check why `feature_extractor` is running on multiple cpus." ]
"2022-12-07T19:09:40"
"2022-12-10T00:39:29"
"2022-12-10T00:39:28"
NONE
null
### Describe the bug I am passing the following `prepare_dataset` function to `Dataset.map` (code is inspired from [here](https://github.com/huggingface/community-events/blob/main/whisper-fine-tuning-event/run_speech_recognition_seq2seq_streaming.py#L454)) ```python3 def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch[text_column]).input_ids return batch ... train_ds = train_ds.map(prepare_dataset) ``` Here is the exact code I am running https://github.com/bayartsogt-ya/whisper-multiple-hf-datasets/blob/main/train.py#L70-L71 It starts using all the cores (I am not sure why because I did not pass `num_proc`) then progress bar stops at every 1k steps. (starts using a single core) then come back to using all the cores again. link to [screen record](https://youtu.be/jPQpQQGp6Gc) Can someone explain this process and maybe provide a way to improve this pipeline? cc: @lhoestq ### Steps to reproduce the bug 1. load the dataset 2. create a Whisper processor 3. create a `prepare_dataset` function 4. pass the function to `dataset.map(prepare_dataset)` ### Expected behavior - Use a single core per a function - not to stop at some point? ### Environment info - `datasets` version: 2.7.1.dev0 - Platform: Linux-5.4.0-109-generic-x86_64-with-glibc2.27 - Python version: 3.8.10 - PyArrow version: 10.0.1 - Pandas version: 1.5.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5338/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5338/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5336
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5336/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5336/comments
https://api.github.com/repos/huggingface/datasets/issues/5336/events
https://github.com/huggingface/datasets/pull/5336
1,479,649,900
PR_kwDODunzps5Egzed
5,336
Set `IterableDataset.map` param `batch_size` typing as optional
{ "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-06T17:08:10"
"2022-12-07T14:14:56"
"2022-12-07T14:06:27"
CONTRIBUTOR
null
This PR solves #5325 ~Indeed we're using the typing for optional values as `Union[type, None]` as it's similar to how Python 3.10 handles optional values as `type | None`, instead of using `Optional[type]`.~ ~Do we want to start using `Union[type, None]` for type-hinting optional values or just keep on using `Optional`?~ -> Keeping `Optional` still for consistency with the rest of the code in `datasets` Also we now allow `batch_size` to be `None` for `IterableDataset.map` and `IterableDataset.filter`e.g. `MappedExamplesIterable` as `map` is internally instantiating those and propagating the `batch_size` param so if it can be `None` for `map` it should also do so for `MappedExamplesIterable`, as well as for `FilteredExamplesIterable` when calling `IterableDataset.filter`. ## TODOs - [x] Add integration tests - [x] Handle scenario where `batched=True` and `batch_size=None` or `batch_size<=0`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5336/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5336/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5336", "html_url": "https://github.com/huggingface/datasets/pull/5336", "diff_url": "https://github.com/huggingface/datasets/pull/5336.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5336.patch", "merged_at": "2022-12-07T14:06:27" }
true
https://api.github.com/repos/huggingface/datasets/issues/5335
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5335/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5335/comments
https://api.github.com/repos/huggingface/datasets/issues/5335/events
https://github.com/huggingface/datasets/pull/5335
1,478,890,788
PR_kwDODunzps5EeHdA
5,335
Update tasks.json
{ "login": "sayakpaul", "id": 22957388, "node_id": "MDQ6VXNlcjIyOTU3Mzg4", "avatar_url": "https://avatars.githubusercontent.com/u/22957388?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sayakpaul", "html_url": "https://github.com/sayakpaul", "followers_url": "https://api.github.com/users/sayakpaul/followers", "following_url": "https://api.github.com/users/sayakpaul/following{/other_user}", "gists_url": "https://api.github.com/users/sayakpaul/gists{/gist_id}", "starred_url": "https://api.github.com/users/sayakpaul/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sayakpaul/subscriptions", "organizations_url": "https://api.github.com/users/sayakpaul/orgs", "repos_url": "https://api.github.com/users/sayakpaul/repos", "events_url": "https://api.github.com/users/sayakpaul/events{/privacy}", "received_events_url": "https://api.github.com/users/sayakpaul/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-06T11:37:57"
"2022-12-08T11:05:33"
"2022-12-07T12:46:03"
MEMBER
null
Context: * https://github.com/huggingface/datasets/issues/5255#issuecomment-1339107195 Cc: @osanseviero
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5335/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5335/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5335", "html_url": "https://github.com/huggingface/datasets/pull/5335", "diff_url": "https://github.com/huggingface/datasets/pull/5335.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5335.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/5334
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5334/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5334/comments
https://api.github.com/repos/huggingface/datasets/issues/5334/events
https://github.com/huggingface/datasets/pull/5334
1,477,421,927
PR_kwDODunzps5EY9zN
5,334
Clean up docstrings
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
null
[]
null
[]
"2022-12-05T20:56:08"
"2022-12-09T01:44:25"
"2022-12-09T01:41:44"
MEMBER
null
As raised by @polinaeterna in #5324, some of the docstrings are a bit of a mess because it has both Markdown and Sphinx syntax. This PR fixes the docstring for `DatasetBuilder`. I'll start working on cleaning up the rest of the docstrings and removing the old Sphinx syntax (let me know if you prefer one big PR with all the cleaned changes or multiple smaller ones)! 🧼
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5334/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5334/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5334", "html_url": "https://github.com/huggingface/datasets/pull/5334", "diff_url": "https://github.com/huggingface/datasets/pull/5334.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5334.patch", "merged_at": "2022-12-09T01:41:44" }
true
https://api.github.com/repos/huggingface/datasets/issues/5333
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5333/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5333/comments
https://api.github.com/repos/huggingface/datasets/issues/5333/events
https://github.com/huggingface/datasets/pull/5333
1,476,890,156
PR_kwDODunzps5EXGQ2
5,333
fix: 🐛 pass the token to get the list of config names
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-05T16:06:09"
"2022-12-06T08:25:17"
"2022-12-06T08:22:49"
CONTRIBUTOR
null
Otherwise, get_dataset_infos doesn't work on gated or private datasets, even with the correct token.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5333/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5333/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5333", "html_url": "https://github.com/huggingface/datasets/pull/5333", "diff_url": "https://github.com/huggingface/datasets/pull/5333.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5333.patch", "merged_at": "2022-12-06T08:22:49" }
true
https://api.github.com/repos/huggingface/datasets/issues/5332
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5332/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5332/comments
https://api.github.com/repos/huggingface/datasets/issues/5332/events
https://github.com/huggingface/datasets/issues/5332
1,476,513,072
I_kwDODunzps5YAc0w
5,332
Passing numpy array to ClassLabel names causes ValueError
{ "login": "freddyheppell", "id": 1475568, "node_id": "MDQ6VXNlcjE0NzU1Njg=", "avatar_url": "https://avatars.githubusercontent.com/u/1475568?v=4", "gravatar_id": "", "url": "https://api.github.com/users/freddyheppell", "html_url": "https://github.com/freddyheppell", "followers_url": "https://api.github.com/users/freddyheppell/followers", "following_url": "https://api.github.com/users/freddyheppell/following{/other_user}", "gists_url": "https://api.github.com/users/freddyheppell/gists{/gist_id}", "starred_url": "https://api.github.com/users/freddyheppell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/freddyheppell/subscriptions", "organizations_url": "https://api.github.com/users/freddyheppell/orgs", "repos_url": "https://api.github.com/users/freddyheppell/repos", "events_url": "https://api.github.com/users/freddyheppell/events{/privacy}", "received_events_url": "https://api.github.com/users/freddyheppell/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Should `datasets` allow `ClassLabel` input parameter to be an `np.array` even though internally we need to cast it to a Python list? @lhoestq @mariosasko ", "Hi! No, I don't think so. The `names` parameter is [annotated](https://github.com/huggingface/datasets/blob/582236640b9109988e5f7a16a8353696ffa09a16/src/datasets/features/features.py#L892) as `List[str]` (**NumPy arrays are not lists**), and considering that type checking is not a common practice in Python, I think we can leave the code as-is.", "I appreciate it is the wrong type, and that type checking is not common, but I think there's a few circumstances that make it a good idea from a usability perspective.\r\n\r\nIt's quite a difficult error to debug because it comes from a utility function (so it's not immediately obvious which parameter caused it). What makes it even more difficult is the exception happens when the features instance is used to instantiate the dataset, **not** when when the wrong type is actually passed when the features is instantiated. When I was debugging the error, I didn't really consider it could be an issue with the features instance because it had instantiated fine. It's also not one of the more common exceptions caused by trying to use a non-list as a list.\r\n\r\nIt's also relatively easy to accidentally get a numpy array of class types (e.g. calling `unique()` on a pandas dataframe column). Additionally, passing in a `set` instead of the list (again, relatively easy because people may run `set(classes)` to generate uniques) causes an error when the features instance is used, albeit a slightly more obvious one.\r\n\r\nThe names list is already being processed and validated in the `__post_init__` method anyway, so it would not really be adding any complexity to check it is actually a list here too. I'm happy to contribute this change if you change your mind about whether it's worthwhile.", "I agree that it's not easy to debug this issue, so perhaps we could add some basic type checking (e.g. `not isinstance(names, list)` -> error) to make debugging easier. Feel free to submit a PR.\r\n\r\n> Additionally, passing in a set instead of the list (again, relatively easy because people may run set(classes) to generate uniques) causes an error when the features instance is used, albeit a slightly more obvious one.\r\n\r\n`set` is an unordered structure (it's ordered in Python 3.6+, but this is CPython's implementation detail), and the order of ClassLabel `names` matters, so this doesn't require a fix.", "What about checking for `Sequence` instead? I think users can pass a list or a tuple as well." ]
"2022-12-05T12:59:03"
"2022-12-22T16:32:50"
"2022-12-22T16:32:50"
CONTRIBUTOR
null
### Describe the bug If a numpy array is passed to the names argument of ClassLabel, creating a dataset with those features causes an error. ### Steps to reproduce the bug https://colab.research.google.com/drive/1cV_es1PWZiEuus17n-2C-w0KEoEZ68IX TLDR: If I define my classes as: ``` my_classes = np.array(['one', 'two', 'three']) ``` Then this errors: ```py features = Features({'value': Value('string'), 'label': ClassLabel(names=my_classes)}) dataset = Dataset.from_list(my_data, features=features) ``` ``` ValueError Traceback (most recent call last) [<ipython-input-8-a8a9d53ec82f>](https://localhost:8080/#) in <module> ----> 1 dataset = Dataset.from_list(my_data, features=features) 11 frames [/usr/local/lib/python3.8/dist-packages/datasets/utils/py_utils.py](https://localhost:8080/#) in _asdict_inner(obj) 183 for f in fields(obj): 184 value = _asdict_inner(getattr(obj, f.name)) --> 185 if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False): 186 result[f.name] = value 187 return result ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() ``` But this works: ``` features2 = Features({'value': Value('string'), 'label': ClassLabel(names=list(my_classes))}) dataset2 = Dataset.from_list(my_data, features=features2) ``` ### Expected behavior If I provide a numpy array of class names, I would expect either an error that the names list is the wrong type, or for it to be cast internally. ### Environment info - `datasets` version: 2.7.1 - Platform: Linux-5.15.0-56-generic-x86_64-with-glibc2.10 - Python version: 3.8.15 - PyArrow version: 10.0.1 - Pandas version: 1.5.2 Additionally: - Numpy version: 1.23.5
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5332/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5332/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5329
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5329/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5329/comments
https://api.github.com/repos/huggingface/datasets/issues/5329/events
https://github.com/huggingface/datasets/pull/5329
1,471,999,125
PR_kwDODunzps5EGK3y
5,329
Clarify imagefolder is for small datasets
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T21:47:29"
"2022-12-06T17:20:04"
"2022-12-06T17:16:53"
MEMBER
null
Based on feedback from [here](https://github.com/huggingface/datasets/issues/5317#issuecomment-1334108824), this PR adds a note to the `imagefolder` loading and creating docs that `imagefolder` is designed for small scale image datasets.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5329/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5329/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5329", "html_url": "https://github.com/huggingface/datasets/pull/5329", "diff_url": "https://github.com/huggingface/datasets/pull/5329.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5329.patch", "merged_at": "2022-12-06T17:16:53" }
true
https://api.github.com/repos/huggingface/datasets/issues/5328
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5328/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5328/comments
https://api.github.com/repos/huggingface/datasets/issues/5328/events
https://github.com/huggingface/datasets/pull/5328
1,471,661,437
PR_kwDODunzps5EFAyT
5,328
Fix docs building for main
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T17:07:45"
"2022-12-02T16:29:00"
"2022-12-02T16:26:00"
MEMBER
null
This PR reverts the triggering event for building documentation introduced by: - #5250 Fix #5326.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5328/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5328/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5328", "html_url": "https://github.com/huggingface/datasets/pull/5328", "diff_url": "https://github.com/huggingface/datasets/pull/5328.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5328.patch", "merged_at": "2022-12-02T16:26:00" }
true
https://api.github.com/repos/huggingface/datasets/issues/5326
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5326/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5326/comments
https://api.github.com/repos/huggingface/datasets/issues/5326/events
https://github.com/huggingface/datasets/issues/5326
1,471,634,168
I_kwDODunzps5Xt1r4
5,326
No documentation for main branch is built
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
"2022-12-01T16:50:58"
"2022-12-02T16:26:01"
"2022-12-02T16:26:01"
MEMBER
null
Since: - #5250 - Commit: 703b84311f4ead83c7f79639f2dfa739295f0be6 the docs for main branch are no longer built. The change introduced only triggers the docs building for releases.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5326/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5326/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5325
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5325/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5325/comments
https://api.github.com/repos/huggingface/datasets/issues/5325/events
https://github.com/huggingface/datasets/issues/5325
1,471,536,822
I_kwDODunzps5Xtd62
5,325
map(...batch_size=None) for IterableDataset
{ "login": "frankier", "id": 299380, "node_id": "MDQ6VXNlcjI5OTM4MA==", "avatar_url": "https://avatars.githubusercontent.com/u/299380?v=4", "gravatar_id": "", "url": "https://api.github.com/users/frankier", "html_url": "https://github.com/frankier", "followers_url": "https://api.github.com/users/frankier/followers", "following_url": "https://api.github.com/users/frankier/following{/other_user}", "gists_url": "https://api.github.com/users/frankier/gists{/gist_id}", "starred_url": "https://api.github.com/users/frankier/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/frankier/subscriptions", "organizations_url": "https://api.github.com/users/frankier/orgs", "repos_url": "https://api.github.com/users/frankier/repos", "events_url": "https://api.github.com/users/frankier/events{/privacy}", "received_events_url": "https://api.github.com/users/frankier/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 1935892877, "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue", "name": "good first issue", "color": "7057ff", "default": true, "description": "Good for newcomers" } ]
closed
false
{ "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false }
[ { "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi! I agree it makes sense for `IterableDataset.map` to support the `batch_size=None` case. This should be super easy to fix.", "@mariosasko as this is something simple maybe I can include it as part of https://github.com/huggingface/datasets/pull/5311? Let me know :+1:", "#self-assign", "Feel free to close this @lhoestq as part of https://github.com/huggingface/datasets/pull/5336 :hugs:", "Thanks again :)\r\n\r\n> For practical usages, an alternative to this would be to convert from an iterable dataset to a map-style dataset, but it is not obvious how to do this.\r\n\r\nThis is interesting as well, if anyone wants to explore" ]
"2022-12-01T15:43:42"
"2022-12-07T15:54:43"
"2022-12-07T15:54:42"
CONTRIBUTOR
null
### Feature request Dataset.map(...) allows batch_size to be None. It would be nice if IterableDataset did too. ### Motivation Although it may seem a bit of a spurious request given that `IterableDataset` is meant for larger than memory datasets, but there are a couple of reasons why this might be nice. One is that load_dataset(...) can return either IterableDataset or Dataset. mypy will then complain if batch_size=None even if we know it is Dataset. Of course we can do: assert isinstance(d, datasets.DatasetDict) But it is a mild inconvenience. What's more annoying is that whenever we use something like e.g. `combine_datasets(...)`, we end up with the union again, and so have to do the assert again. Another is that we could actually end up with an IterableDataset small enough for memory in normal/correct usage, e.g. by filtering a massive IterableDataset. For practical usages, an alternative to this would be to convert from an iterable dataset to a map-style dataset, but it is not obvious how to do this. ### Your contribution Not this time.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5325/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5325/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5323
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5323/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5323/comments
https://api.github.com/repos/huggingface/datasets/issues/5323/events
https://github.com/huggingface/datasets/issues/5323
1,471,518,803
I_kwDODunzps5XtZhT
5,323
Duplicated Keys in Taskmaster-2 Dataset
{ "login": "liaeh", "id": 52380283, "node_id": "MDQ6VXNlcjUyMzgwMjgz", "avatar_url": "https://avatars.githubusercontent.com/u/52380283?v=4", "gravatar_id": "", "url": "https://api.github.com/users/liaeh", "html_url": "https://github.com/liaeh", "followers_url": "https://api.github.com/users/liaeh/followers", "following_url": "https://api.github.com/users/liaeh/following{/other_user}", "gists_url": "https://api.github.com/users/liaeh/gists{/gist_id}", "starred_url": "https://api.github.com/users/liaeh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liaeh/subscriptions", "organizations_url": "https://api.github.com/users/liaeh/orgs", "repos_url": "https://api.github.com/users/liaeh/repos", "events_url": "https://api.github.com/users/liaeh/events{/privacy}", "received_events_url": "https://api.github.com/users/liaeh/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @liaeh.\r\n\r\nWe are having a look at it. ", "I have transferred the discussion to the Community tab of the dataset: https://huggingface.co/datasets/taskmaster2/discussions/1" ]
"2022-12-01T15:31:06"
"2022-12-01T16:26:06"
"2022-12-01T16:26:06"
NONE
null
### Describe the bug Loading certain splits () of the taskmaster-2 dataset fails because of a DuplicatedKeysError. This occurs for the following domains: `'hotels', 'movies', 'music', 'sports'`. The domains `'flights', 'food-ordering', 'restaurant-search'` load fine. Output: ### Steps to reproduce the bug ``` from datasets import load_dataset dataset = load_dataset("taskmaster2", "music") ``` Output: ``` --------------------------------------------------------------------------- DuplicatedKeysError Traceback (most recent call last) File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:1532, in GeneratorBasedBuilder._prepare_split_single(self, arg) [1531](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1530) example = self.info.features.encode_example(record) if self.info.features is not None else record -> [1532](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1531) writer.write(example, key) [1533](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1532) num_examples_progress_update += 1 File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py:475, in ArrowWriter.write(self, example, key, writer_batch_size) [474](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=473) if self._check_duplicates: --> [475](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=474) self.check_duplicate_keys() [476](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=475) # Re-intializing to empty list for next batch File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py:492, in ArrowWriter.check_duplicate_keys(self) [486](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=485) duplicate_key_indices = [ [487](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=486) str(self._num_examples + index) [488](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=487) for index, (duplicate_hash, _) in enumerate(self.hkey_record) [489](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=488) if duplicate_hash == hash [490](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=489) ] --> [492](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=491) raise DuplicatedKeysError(key, duplicate_key_indices) [493](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=492) else: DuplicatedKeysError: Found multiple examples generated with the same key The examples at index 858, 859 have the key dlg-89174425-d57a-4db7-a92b-165c3bff6735 During handling of the above exception, another exception occurred: DuplicatedKeysError Traceback (most recent call last) File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:1541, in GeneratorBasedBuilder._prepare_split_single(self, arg) [1540](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1539) num_shards = shard_id + 1 -> [1541](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1540) num_examples, num_bytes = writer.finalize() [1542](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1541) writer.close() File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py:563, in ArrowWriter.finalize(self, close_stream) [562](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=561) if self._check_duplicates: --> [563](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=562) self.check_duplicate_keys() [564](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=563) # Re-intializing to empty list for next batch File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py:492, in ArrowWriter.check_duplicate_keys(self) [486](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=485) duplicate_key_indices = [ [487](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=486) str(self._num_examples + index) [488](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=487) for index, (duplicate_hash, _) in enumerate(self.hkey_record) [489](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=488) if duplicate_hash == hash [490](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=489) ] --> [492](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=491) raise DuplicatedKeysError(key, duplicate_key_indices) [493](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/arrow_writer.py?line=492) else: DuplicatedKeysError: Found multiple examples generated with the same key The examples at index 858, 859 have the key dlg-89174425-d57a-4db7-a92b-165c3bff6735 The above exception was the direct cause of the following exception: DatasetGenerationError Traceback (most recent call last) Cell In[23], line 1 ----> 1 dataset = load_dataset("taskmaster2", "music") File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py:1741, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs) [1738](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1737) try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES [1740](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1739) # Download and prepare data -> [1741](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1740) builder_instance.download_and_prepare( [1742](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1741) download_config=download_config, [1743](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1742) download_mode=download_mode, [1744](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1743) ignore_verifications=ignore_verifications, [1745](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1744) try_from_hf_gcs=try_from_hf_gcs, [1746](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1745) use_auth_token=use_auth_token, [1747](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1746) num_proc=num_proc, [1748](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1747) ) [1750](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1749) # Build dataset for splits [1751](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1750) keep_in_memory = ( [1752](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1751) keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) [1753](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/load.py?line=1752) ) File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:822, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs) [820](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=819) if num_proc is not None: [821](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=820) prepare_split_kwargs["num_proc"] = num_proc --> [822](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=821) self._download_and_prepare( [823](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=822) dl_manager=dl_manager, [824](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=823) verify_infos=verify_infos, [825](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=824) **prepare_split_kwargs, [826](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=825) **download_and_prepare_kwargs, [827](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=826) ) [828](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=827) # Sync info [829](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=828) self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:1555, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_splits_kwargs) [1554](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1553) def _download_and_prepare(self, dl_manager, verify_infos, **prepare_splits_kwargs): -> [1555](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1554) super()._download_and_prepare( [1556](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1555) dl_manager, verify_infos, check_duplicate_keys=verify_infos, **prepare_splits_kwargs [1557](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1556) ) File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:913, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) [909](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=908) split_dict.add(split_generator.split_info) [911](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=910) try: [912](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=911) # Prepare split will record examples associated to the split --> [913](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=912) self._prepare_split(split_generator, **prepare_split_kwargs) [914](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=913) except OSError as e: [915](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=914) raise OSError( [916](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=915) "Cannot find data file. " [917](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=916) + (self.manual_download_instructions or "") [918](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=917) + "\nOriginal error:\n" [919](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=918) + str(e) [920](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=919) ) from None File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:1396, in GeneratorBasedBuilder._prepare_split(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size) [1394](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1393) gen_kwargs = split_generator.gen_kwargs [1395](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1394) job_id = 0 -> [1396](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1395) for job_id, done, content in self._prepare_split_single( [1397](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1396) {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args} [1398](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1397) ): [1399](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1398) if done: [1400](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1399) result = content File ~/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py:1550, in GeneratorBasedBuilder._prepare_split_single(self, arg) [1548](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1547) if isinstance(e, SchemaInferenceError) and e.__context__ is not None: [1549](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1548) e = e.__context__ -> [1550](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1549) raise DatasetGenerationError("An error occurred while generating the dataset") from e [1552](file:///home/user/repos/tts-dataset/tts-dataset/venv/lib/python3.9/site-packages/datasets/builder.py?line=1551) yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths) DatasetGenerationError: An error occurred while generating the dataset ``` ### Expected behavior Loads the dataset ### Environment info - `datasets` version: 2.7.1 - Platform: Linux-5.13.0-40-generic-x86_64-with-glibc2.31 - Python version: 3.9.7 - PyArrow version: 10.0.1 - Pandas version: 1.5.2
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5323/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5323/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5322
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5322/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5322/comments
https://api.github.com/repos/huggingface/datasets/issues/5322/events
https://github.com/huggingface/datasets/pull/5322
1,471,502,162
PR_kwDODunzps5EEeQP
5,322
Raise error for `.tar` archives in the same way as for `.tar.gz` and `.tgz` in `_get_extraction_protocol`
{ "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T15:19:28"
"2022-12-14T16:37:16"
"2022-12-14T16:33:30"
CONTRIBUTOR
null
Currently `download_and_extract` doesn't throw an error when it is used with files with `.tar` extension in streaming mode because `_get_extraction_protocol` doesn't do it (like it does for `tar.gz` and `tgz`). `_get_extraction_protocol` returns formatted url as if we support tar protocol but we don't. That means that in dataset scripts `.tar` files would be attempted to load and fail during examples generation (after `download_and_extract` execution). So this PR raises error for `tar` files too.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5322/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5322/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5322", "html_url": "https://github.com/huggingface/datasets/pull/5322", "diff_url": "https://github.com/huggingface/datasets/pull/5322.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5322.patch", "merged_at": "2022-12-14T16:33:30" }
true
https://api.github.com/repos/huggingface/datasets/issues/5321
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5321/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5321/comments
https://api.github.com/repos/huggingface/datasets/issues/5321/events
https://github.com/huggingface/datasets/pull/5321
1,471,430,667
PR_kwDODunzps5EEOhE
5,321
Fix loading from HF GCP cache
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T14:39:06"
"2022-12-01T16:10:09"
"2022-12-01T16:07:02"
MEMBER
null
As reported in https://discuss.huggingface.co/t/error-loading-wikipedia-dataset/26599/4 it's not possible to download a cached version of Wikipedia from the HF GCP cache I fixed it and added an integration test (runs in 10sec)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5321/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5321/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5321", "html_url": "https://github.com/huggingface/datasets/pull/5321", "diff_url": "https://github.com/huggingface/datasets/pull/5321.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5321.patch", "merged_at": "2022-12-01T16:07:02" }
true
https://api.github.com/repos/huggingface/datasets/issues/5320
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5320/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5320/comments
https://api.github.com/repos/huggingface/datasets/issues/5320/events
https://github.com/huggingface/datasets/pull/5320
1,471,360,910
PR_kwDODunzps5ED_UQ
5,320
[Extract] Place the lock file next to the destination directory
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T13:55:49"
"2022-12-01T15:36:44"
"2022-12-01T15:33:58"
MEMBER
null
Previously it was placed next to the archive to extract, but the archive can be in a read-only directory as noticed in https://github.com/huggingface/datasets/issues/5295 Therefore I moved the lock location to be next to the destination directory, which is required to have write permissions
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5320/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5320/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5320", "html_url": "https://github.com/huggingface/datasets/pull/5320", "diff_url": "https://github.com/huggingface/datasets/pull/5320.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5320.patch", "merged_at": "2022-12-01T15:33:58" }
true
https://api.github.com/repos/huggingface/datasets/issues/5319
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5319/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5319/comments
https://api.github.com/repos/huggingface/datasets/issues/5319/events
https://github.com/huggingface/datasets/pull/5319
1,470,945,515
PR_kwDODunzps5ECkfc
5,319
Fix Text sample_by paragraph
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T09:08:09"
"2022-12-01T15:21:44"
"2022-12-01T15:19:00"
MEMBER
null
Fix #5316.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5319/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5319/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5319", "html_url": "https://github.com/huggingface/datasets/pull/5319", "diff_url": "https://github.com/huggingface/datasets/pull/5319.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5319.patch", "merged_at": "2022-12-01T15:19:00" }
true
https://api.github.com/repos/huggingface/datasets/issues/5318
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5318/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5318/comments
https://api.github.com/repos/huggingface/datasets/issues/5318/events
https://github.com/huggingface/datasets/pull/5318
1,470,749,750
PR_kwDODunzps5EB6RM
5,318
Origin/fix missing features error
{ "login": "eunseojo", "id": 12104720, "node_id": "MDQ6VXNlcjEyMTA0NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/12104720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eunseojo", "html_url": "https://github.com/eunseojo", "followers_url": "https://api.github.com/users/eunseojo/followers", "following_url": "https://api.github.com/users/eunseojo/following{/other_user}", "gists_url": "https://api.github.com/users/eunseojo/gists{/gist_id}", "starred_url": "https://api.github.com/users/eunseojo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eunseojo/subscriptions", "organizations_url": "https://api.github.com/users/eunseojo/orgs", "repos_url": "https://api.github.com/users/eunseojo/repos", "events_url": "https://api.github.com/users/eunseojo/events{/privacy}", "received_events_url": "https://api.github.com/users/eunseojo/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-12-01T06:18:39"
"2022-12-12T19:06:42"
"2022-12-04T05:49:39"
CONTRIBUTOR
null
This fixes the problem of when the dataset_load function reads a function with "features" provided but some read batches don't have columns that later show up. For instance, the provided "features" requires columns A,B,C but only columns B,C show. This fixes this by adding the column A with nulls.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5318/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5318/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5318", "html_url": "https://github.com/huggingface/datasets/pull/5318", "diff_url": "https://github.com/huggingface/datasets/pull/5318.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5318.patch", "merged_at": "2022-12-04T05:49:39" }
true
https://api.github.com/repos/huggingface/datasets/issues/5316
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5316/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5316/comments
https://api.github.com/repos/huggingface/datasets/issues/5316/events
https://github.com/huggingface/datasets/issues/5316
1,470,115,681
I_kwDODunzps5XoC9h
5,316
Bug in sample_by="paragraph"
{ "login": "adampauls", "id": 1243668, "node_id": "MDQ6VXNlcjEyNDM2Njg=", "avatar_url": "https://avatars.githubusercontent.com/u/1243668?v=4", "gravatar_id": "", "url": "https://api.github.com/users/adampauls", "html_url": "https://github.com/adampauls", "followers_url": "https://api.github.com/users/adampauls/followers", "following_url": "https://api.github.com/users/adampauls/following{/other_user}", "gists_url": "https://api.github.com/users/adampauls/gists{/gist_id}", "starred_url": "https://api.github.com/users/adampauls/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/adampauls/subscriptions", "organizations_url": "https://api.github.com/users/adampauls/orgs", "repos_url": "https://api.github.com/users/adampauls/repos", "events_url": "https://api.github.com/users/adampauls/events{/privacy}", "received_events_url": "https://api.github.com/users/adampauls/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting, @adampauls.\r\n\r\nWe are having a look at it. " ]
"2022-11-30T19:24:13"
"2022-12-01T15:19:02"
"2022-12-01T15:19:02"
NONE
null
### Describe the bug I think [this line](https://github.com/huggingface/datasets/blob/main/src/datasets/packaged_modules/text/text.py#L96) is wrong and should be `batch = f.read(self.config.chunksize)`. Otherwise it will never terminate because even when `f` is finished reading, `batch` will still be truthy from the last iteration. ### Steps to reproduce the bug ``` > cat test.txt a b c d e f ```` ```python >>> import datasets >>> datasets.load_dataset("text", data_files={"train":"test.txt"}, sample_by="paragraph") ``` This will go on forever. ### Expected behavior Terminates very quickly. ### Environment info `version = "2.6.1"` but I think the bug is still there on main.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5316/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5316/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5313
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5313/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5313/comments
https://api.github.com/repos/huggingface/datasets/issues/5313/events
https://github.com/huggingface/datasets/pull/5313
1,468,484,136
PR_kwDODunzps5D6Qfb
5,313
Fix description of streaming in the docs
{ "login": "polinaeterna", "id": 16348744, "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "gravatar_id": "", "url": "https://api.github.com/users/polinaeterna", "html_url": "https://github.com/polinaeterna", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "repos_url": "https://api.github.com/users/polinaeterna/repos", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-11-29T18:00:28"
"2022-12-01T14:55:30"
"2022-12-01T14:00:34"
CONTRIBUTOR
null
We say that "the data is being downloaded progressively" which is not true, it's just streamed, so I fixed it. Probably I missed some other places where it is written? Also changed docstrings for `StreamingDownloadManager`'s `download` and `extract` to reflect the same, as these docstrings are displayed in the documentation cc @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5313/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5313/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5313", "html_url": "https://github.com/huggingface/datasets/pull/5313", "diff_url": "https://github.com/huggingface/datasets/pull/5313.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5313.patch", "merged_at": "2022-12-01T14:00:34" }
true
https://api.github.com/repos/huggingface/datasets/issues/5312
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5312/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5312/comments
https://api.github.com/repos/huggingface/datasets/issues/5312/events
https://github.com/huggingface/datasets/pull/5312
1,468,352,562
PR_kwDODunzps5D5zxI
5,312
Add DatasetDict.to_pandas
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-11-29T16:30:02"
"2023-01-25T17:33:43"
"2023-01-25T17:33:42"
MEMBER
null
From discussions in https://github.com/huggingface/datasets/issues/5189, for tabular data it doesn't really make sense to have to do ```python df = load_dataset(...)["train"].to_pandas() ``` because many datasets are not split. In this PR I added `to_pandas` to `DatasetDict` which returns the DataFrame: If there's only one split, you don't need to specify the split name: ```python df = load_dataset(...).to_pandas() ``` EDIT: and if a dataset has multiple splits: ```python df = load_dataset(...).to_pandas(splits=["train", "test"]) # or df = load_dataset(...).to_pandas(splits="all") # raises an error because you need to select the split(s) to convert load_dataset(...).to_pandas() ``` I do have one question though @merveenoyan @adrinjalali @mariosasko: Should we raise an error if there are multiple splits and ask the user to choose one explicitly ?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5312/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5312/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5312", "html_url": "https://github.com/huggingface/datasets/pull/5312", "diff_url": "https://github.com/huggingface/datasets/pull/5312.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5312.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/5311
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5311/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5311/comments
https://api.github.com/repos/huggingface/datasets/issues/5311/events
https://github.com/huggingface/datasets/pull/5311
1,467,875,153
PR_kwDODunzps5D4Mm3
5,311
Add `features` param to `IterableDataset.map`
{ "login": "alvarobartt", "id": 36760800, "node_id": "MDQ6VXNlcjM2NzYwODAw", "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alvarobartt", "html_url": "https://github.com/alvarobartt", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "repos_url": "https://api.github.com/users/alvarobartt/repos", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
"2022-11-29T11:08:34"
"2022-12-06T15:45:02"
"2022-12-06T15:42:04"
CONTRIBUTOR
null
## Description As suggested by @lhoestq in #3888, we should be adding the param `features` to `IterableDataset.map` so that the features can be preserved (not turned into `None` as that's the default behavior) whenever the user passes those as param, so as to be consistent with `Dataset.map`, as it provides the `features` param so that those are not inferred by default, but specified by the user, and later validated by `ArrowWriter`. This is internally handled already by the functions relying on `IterableDataset.map` such as `rename_column`, `rename_columns`, and `remove_columns` as described in #5287. ## Usage Example ```python from datasets import load_dataset, Features ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) print(ds.info.features) ds = ds.map( lambda x: {"target": x["label"]}, features=Features( {"target": ds.info.features["label"], "label": ds.info.features["label"], "text": ds.info.features["text"]} ), ) print(ds.info.features) ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/5311/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/5311/timeline
null
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/5311", "html_url": "https://github.com/huggingface/datasets/pull/5311", "diff_url": "https://github.com/huggingface/datasets/pull/5311.diff", "patch_url": "https://github.com/huggingface/datasets/pull/5311.patch", "merged_at": "2022-12-06T15:42:04" }
true