url
stringlengths 58
61
| repository_url
stringclasses 1
value | labels_url
stringlengths 72
75
| comments_url
stringlengths 67
70
| events_url
stringlengths 65
68
| html_url
stringlengths 46
51
| id
int64 599M
1.62B
| node_id
stringlengths 18
32
| number
int64 1
5.62k
| title
stringlengths 1
290
| user
dict | labels
list | state
stringclasses 1
value | locked
bool 1
class | assignee
dict | assignees
list | milestone
dict | comments
sequence | created_at
unknown | updated_at
unknown | closed_at
unknown | author_association
stringclasses 3
values | active_lock_reason
null | body
stringlengths 0
228k
β | reactions
dict | timeline_url
stringlengths 67
70
| performed_via_github_app
null | state_reason
stringclasses 2
values | draft
bool 2
classes | pull_request
dict | is_pull_request
bool 2
classes |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
https://api.github.com/repos/huggingface/datasets/issues/4573 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4573/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4573/comments | https://api.github.com/repos/huggingface/datasets/issues/4573/events | https://github.com/huggingface/datasets/pull/4573 | 1,285,023,629 | PR_kwDODunzps46YEEa | 4,573 | Fix evaluation metadata for ncbi_disease | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 4564477500,
"node_id": "LA_kwDODunzps8AAAABEBBmPA",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution",
"name": "dataset contribution",
"color": "0e8a16",
"default": false,
"description": "Contribution to a dataset script"
}
] | closed | false | null | [] | null | [] | "2022-06-26T20:29:32" | "2022-09-23T09:40:00" | "2022-09-23T09:38:02" | MEMBER | null | This PR fixes the task in the evaluation metadata and removes the metrics info as we've decided this is not a great way to propagate this information downstream. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4573/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4573/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4573",
"html_url": "https://github.com/huggingface/datasets/pull/4573",
"diff_url": "https://github.com/huggingface/datasets/pull/4573.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4573.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4572 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4572/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4572/comments | https://api.github.com/repos/huggingface/datasets/issues/4572/events | https://github.com/huggingface/datasets/issues/4572 | 1,285,022,499 | I_kwDODunzps5Ml-Mj | 4,572 | Dataset Viewer issue for mlsum | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Thanks for reporting, @lewtun.\r\n\r\nAfter investigation, it seems that the server https://gitlab.lip6.fr does not allow HTTP Range requests.\r\n\r\nWe are trying to find a workaround..."
] | "2022-06-26T20:24:17" | "2022-07-21T12:40:01" | "2022-07-21T12:40:01" | MEMBER | null | ### Link
https://huggingface.co/datasets/mlsum/viewer/de/train
### Description
There's seems to be a problem with the download / streaming of this dataset:
```
Server error
Status code: 400
Exception: BadZipFile
Message: File is not a zip file
```
### Owner
No | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4572/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4572/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4570 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4570/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4570/comments | https://api.github.com/repos/huggingface/datasets/issues/4570/events | https://github.com/huggingface/datasets/issues/4570 | 1,284,846,168 | I_kwDODunzps5MlTJY | 4,570 | Dataset sharding non-contiguous? | {
"login": "cakiki",
"id": 3664563,
"node_id": "MDQ6VXNlcjM2NjQ1NjM=",
"avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/cakiki",
"html_url": "https://github.com/cakiki",
"followers_url": "https://api.github.com/users/cakiki/followers",
"following_url": "https://api.github.com/users/cakiki/following{/other_user}",
"gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}",
"starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/cakiki/subscriptions",
"organizations_url": "https://api.github.com/users/cakiki/orgs",
"repos_url": "https://api.github.com/users/cakiki/repos",
"events_url": "https://api.github.com/users/cakiki/events{/privacy}",
"received_events_url": "https://api.github.com/users/cakiki/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"This was silly; I was sure I'd looked for a `contiguous` argument, and was certain there wasn't one the first time I looked :smile:\r\n\r\nSorry about that.",
"Hi! You can pass `contiguous=True` to `.shard()` get contiguous shards. More info on this and the default behavior can be found in the [docs](https://huggingface.co/docs/datasets/v2.3.2/en/package_reference/main_classes#datasets.Dataset.shard).\r\n\r\nEDIT: Answered as you closed the thread π ",
"Hahaha I'm sorry; my excuse is: it's Sunday. (Which makes me all the more grateful for your response :smiley: ",
"@mariosasko Sorry for reviving this, but I was curious as to why `contiguous=False` was the default. This might be a personal bias, but I feel that a user would expect the opposite to be the default. :thinking: ",
"This project started as a fork of TFDS, and `contiguous=False` is the default behavior [there](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shard)."
] | "2022-06-26T08:34:05" | "2022-06-30T11:00:47" | "2022-06-26T14:36:20" | CONTRIBUTOR | null | ## Describe the bug
I'm not sure if this is a bug; more likely normal behavior but i wanted to double check.
Is it normal that `datasets.shard` does not produce chunks that, when concatenated produce the original ordering of the sharded dataset?
This might be related to this pull request (https://github.com/huggingface/datasets/pull/4466) but I have to admit I did not properly look into the changes made.
## Steps to reproduce the bug
```python
max_shard_size = convert_file_size_to_int('300MB')
dataset_nbytes = dataset.data.nbytes
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, 1)
print(f"{num_shards=}")
for shard_index in range(num_shards):
shard = dataset.shard(num_shards=num_shards, index=shard_index)
shard.to_parquet(f"tokenized/tokenized-{shard_index:03d}.parquet")
os.listdir('tokenized/')
```
## Expected results
I expected the shards to match the order of the data of the original dataset; i.e. `dataset[10]` being the same as `shard_1[10]` for example
## Actual results
Only the first element is the same; i.e. `dataset[0]` is the same as `shard_1[0]`
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 2.3.2
- Platform: Linux-4.15.0-176-generic-x86_64-with-glibc2.31
- Python version: 3.10.4
- PyArrow version: 8.0.0
- Pandas version: 1.4.2
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4570/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4570/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4569 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4569/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4569/comments | https://api.github.com/repos/huggingface/datasets/issues/4569/events | https://github.com/huggingface/datasets/issues/4569 | 1,284,833,694 | I_kwDODunzps5MlQGe | 4,569 | Dataset Viewer issue for sst2 | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @lewtun, thanks for reporting.\r\n\r\nI have checked locally and refreshed the preview and it seems working smooth now:\r\n```python\r\nIn [8]: ds\r\nOut[8]: \r\nDatasetDict({\r\n train: Dataset({\r\n features: ['idx', 'sentence', 'label'],\r\n num_rows: 67349\r\n })\r\n validation: Dataset({\r\n features: ['idx', 'sentence', 'label'],\r\n num_rows: 872\r\n })\r\n test: Dataset({\r\n features: ['idx', 'sentence', 'label'],\r\n num_rows: 1821\r\n })\r\n})\r\n```\r\n\r\nCould you confirm? ",
"Thanks @albertvillanova - it is indeed working now (not sure what caused the error in the first place). Closing this :)"
] | "2022-06-26T07:32:54" | "2022-06-27T06:37:48" | "2022-06-27T06:37:48" | MEMBER | null | ### Link
https://huggingface.co/datasets/sst2
### Description
Not sure what is causing this, however it seems that `load_dataset("sst2")` also hangs (even though it downloads the files without problem):
```
Status code: 400
Exception: Exception
Message: Give up after 5 attempts with ConnectionError
```
### Owner
No | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4569/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4569/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4568 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4568/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4568/comments | https://api.github.com/repos/huggingface/datasets/issues/4568/events | https://github.com/huggingface/datasets/issues/4568 | 1,284,655,624 | I_kwDODunzps5MkkoI | 4,568 | XNLI cache reload is very slow | {
"login": "Muennighoff",
"id": 62820084,
"node_id": "MDQ6VXNlcjYyODIwMDg0",
"avatar_url": "https://avatars.githubusercontent.com/u/62820084?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Muennighoff",
"html_url": "https://github.com/Muennighoff",
"followers_url": "https://api.github.com/users/Muennighoff/followers",
"following_url": "https://api.github.com/users/Muennighoff/following{/other_user}",
"gists_url": "https://api.github.com/users/Muennighoff/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Muennighoff/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Muennighoff/subscriptions",
"organizations_url": "https://api.github.com/users/Muennighoff/orgs",
"repos_url": "https://api.github.com/users/Muennighoff/repos",
"events_url": "https://api.github.com/users/Muennighoff/events{/privacy}",
"received_events_url": "https://api.github.com/users/Muennighoff/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi,\r\nCould you tell us how you are running this code?\r\nI tested on my machine (M1 Mac). And it is running fine both on and off internet.\r\n\r\n<img width=\"1033\" alt=\"Screen Shot 2022-07-03 at 1 32 25 AM\" src=\"https://user-images.githubusercontent.com/8711912/177026364-4ad7cedb-e524-4513-97f7-7961bbb34c90.png\">\r\nTested on both stable and dev version. ",
"Sure, I was running it on a Linux machine.\r\nI found that if I turn the Internet off, it would still try to make a HTTPS call which would slow down the cache loading. If you can't reproduce then we can close the issue.",
"Hi @Muennighoff! You can set the env variable `HF_DATASETS_OFFLINE` to `1` to avoid this behavior in offline mode. More info is available [here](https://huggingface.co/docs/datasets/master/en/loading#offline)."
] | "2022-06-25T16:43:56" | "2022-07-04T14:29:40" | "2022-07-04T14:29:40" | CONTRIBUTOR | null | ### Reproduce
Using `2.3.3.dev0`
`from datasets import load_dataset`
`load_dataset("xnli", "en")`
Turn off Internet
`load_dataset("xnli", "en")`
I cancelled the second `load_dataset` eventually cuz it took super long. It would be great to have something to specify e.g. `only_load_from_cache` and avoid the library trying to download when there is no Internet. If I leave it running it works but takes way longer than when there is Internet. I would expect loading from cache to take the same amount of time regardless of whether there is Internet.
```
---------------------------------------------------------------------------
gaierror Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/urllib3/connection.py in _new_conn(self)
174 conn = connection.create_connection(
--> 175 (self._dns_host, self.port), self.timeout, **extra_kw
176 )
/opt/conda/lib/python3.7/site-packages/urllib3/util/connection.py in create_connection(address, timeout, source_address, socket_options)
71
---> 72 for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
73 af, socktype, proto, canonname, sa = res
/opt/conda/lib/python3.7/socket.py in getaddrinfo(host, port, family, type, proto, flags)
751 addrlist = []
--> 752 for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
753 af, socktype, proto, canonname, sa = res
gaierror: [Errno -3] Temporary failure in name resolution
During handling of the above exception, another exception occurred:
KeyboardInterrupt Traceback (most recent call last)
/tmp/ipykernel_33/3594208039.py in <module>
----> 1 load_dataset("xnli", "en")
/opt/conda/lib/python3.7/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs)
1673 revision=revision,
1674 use_auth_token=use_auth_token,
-> 1675 **config_kwargs,
1676 )
1677
/opt/conda/lib/python3.7/site-packages/datasets/load.py in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, use_auth_token, **config_kwargs)
1494 download_mode=download_mode,
1495 data_dir=data_dir,
-> 1496 data_files=data_files,
1497 )
1498
/opt/conda/lib/python3.7/site-packages/datasets/load.py in dataset_module_factory(path, revision, download_config, download_mode, force_local_path, dynamic_modules_path, data_dir, data_files, **download_kwargs)
1182 download_config=download_config,
1183 download_mode=download_mode,
-> 1184 dynamic_modules_path=dynamic_modules_path,
1185 ).get_module()
1186 elif path.count("/") == 1: # community dataset on the Hub
/opt/conda/lib/python3.7/site-packages/datasets/load.py in __init__(self, name, revision, download_config, download_mode, dynamic_modules_path)
506 self.dynamic_modules_path = dynamic_modules_path
507 assert self.name.count("/") == 0
--> 508 increase_load_count(name, resource_type="dataset")
509
510 def download_loading_script(self, revision: Optional[str]) -> str:
/opt/conda/lib/python3.7/site-packages/datasets/load.py in increase_load_count(name, resource_type)
166 if not config.HF_DATASETS_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
167 try:
--> 168 head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
169 except Exception:
170 pass
/opt/conda/lib/python3.7/site-packages/datasets/utils/file_utils.py in head_hf_s3(identifier, filename, use_cdn, dataset, max_retries)
93 return http_head(
94 hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
---> 95 max_retries=max_retries,
96 )
97
/opt/conda/lib/python3.7/site-packages/datasets/utils/file_utils.py in http_head(url, proxies, headers, cookies, allow_redirects, timeout, max_retries)
445 allow_redirects=allow_redirects,
446 timeout=timeout,
--> 447 max_retries=max_retries,
448 )
449 return response
/opt/conda/lib/python3.7/site-packages/datasets/utils/file_utils.py in _request_with_retry(method, url, max_retries, base_wait_time, max_wait_time, timeout, **params)
366 tries += 1
367 try:
--> 368 response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
369 success = True
370 except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
/opt/conda/lib/python3.7/site-packages/requests/api.py in request(method, url, **kwargs)
59 # cases, and look like a memory leak in others.
60 with sessions.Session() as session:
---> 61 return session.request(method=method, url=url, **kwargs)
62
63
/opt/conda/lib/python3.7/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
527 }
528 send_kwargs.update(settings)
--> 529 resp = self.send(prep, **send_kwargs)
530
531 return resp
/opt/conda/lib/python3.7/site-packages/requests/sessions.py in send(self, request, **kwargs)
643
644 # Send the request
--> 645 r = adapter.send(request, **kwargs)
646
647 # Total elapsed time of the request (approximately)
/opt/conda/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
448 decode_content=False,
449 retries=self.max_retries,
--> 450 timeout=timeout
451 )
452
/opt/conda/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
708 body=body,
709 headers=headers,
--> 710 chunked=chunked,
711 )
712
/opt/conda/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
384 # Trigger any extra validation we need to do.
385 try:
--> 386 self._validate_conn(conn)
387 except (SocketTimeout, BaseSSLError) as e:
388 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
/opt/conda/lib/python3.7/site-packages/urllib3/connectionpool.py in _validate_conn(self, conn)
1038 # Force connect early to allow us to validate the connection.
1039 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
-> 1040 conn.connect()
1041
1042 if not conn.is_verified:
/opt/conda/lib/python3.7/site-packages/urllib3/connection.py in connect(self)
356 def connect(self):
357 # Add certificate verification
--> 358 self.sock = conn = self._new_conn()
359 hostname = self.host
360 tls_in_tls = False
/opt/conda/lib/python3.7/site-packages/urllib3/connection.py in _new_conn(self)
173 try:
174 conn = connection.create_connection(
--> 175 (self._dns_host, self.port), self.timeout, **extra_kw
176 )
177
KeyboardInterrupt:
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4568/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4568/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4567 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4567/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4567/comments | https://api.github.com/repos/huggingface/datasets/issues/4567/events | https://github.com/huggingface/datasets/pull/4567 | 1,284,528,474 | PR_kwDODunzps46Wh0- | 4,567 | Add evaluation data for amazon_reviews_multi | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 4564477500,
"node_id": "LA_kwDODunzps8AAAABEBBmPA",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution",
"name": "dataset contribution",
"color": "0e8a16",
"default": false,
"description": "Contribution to a dataset script"
}
] | closed | false | null | [] | null | [] | "2022-06-25T09:40:52" | "2022-09-23T09:39:39" | "2022-09-23T09:37:23" | MEMBER | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4567/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4567/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4567",
"html_url": "https://github.com/huggingface/datasets/pull/4567",
"diff_url": "https://github.com/huggingface/datasets/pull/4567.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4567.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4566 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4566/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4566/comments | https://api.github.com/repos/huggingface/datasets/issues/4566/events | https://github.com/huggingface/datasets/issues/4566 | 1,284,397,594 | I_kwDODunzps5Mjloa | 4,566 | Document link #load_dataset_enhancing_performance points to nowhere | {
"login": "subercui",
"id": 11674033,
"node_id": "MDQ6VXNlcjExNjc0MDMz",
"avatar_url": "https://avatars.githubusercontent.com/u/11674033?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/subercui",
"html_url": "https://github.com/subercui",
"followers_url": "https://api.github.com/users/subercui/followers",
"following_url": "https://api.github.com/users/subercui/following{/other_user}",
"gists_url": "https://api.github.com/users/subercui/gists{/gist_id}",
"starred_url": "https://api.github.com/users/subercui/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/subercui/subscriptions",
"organizations_url": "https://api.github.com/users/subercui/orgs",
"repos_url": "https://api.github.com/users/subercui/repos",
"events_url": "https://api.github.com/users/subercui/events{/privacy}",
"received_events_url": "https://api.github.com/users/subercui/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi! This is indeed the link the docstring should point to. Are you interested in submitting a PR to fix this?",
"https://github.com/huggingface/datasets/blame/master/docs/source/cache.mdx#L93\r\n\r\nThere seems already an anchor here. Somehow it doesn't work. I am not very familiar with how this online documentation works."
] | "2022-06-25T01:18:19" | "2023-01-24T16:33:40" | "2023-01-24T16:33:40" | NONE | null | ## Describe the bug
A clear and concise description of what the bug is.
![image](https://user-images.githubusercontent.com/11674033/175752806-5b066b92-9d28-4771-9112-5c8606f07741.png)
The [load_dataset_enhancing_performance](https://huggingface.co/docs/datasets/v2.3.2/en/package_reference/main_classes#load_dataset_enhancing_performance) link [here](https://huggingface.co/docs/datasets/v2.3.2/en/package_reference/main_classes#datasets.Dataset.load_from_disk.keep_in_memory) points to nowhere, I guess it should point to https://huggingface.co/docs/datasets/v2.3.2/en/cache#improve-performance?
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4566/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4566/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4565 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4565/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4565/comments | https://api.github.com/repos/huggingface/datasets/issues/4565/events | https://github.com/huggingface/datasets/issues/4565 | 1,284,141,666 | I_kwDODunzps5MinJi | 4,565 | Add UFSC OCPap dataset | {
"login": "johnnv1",
"id": 20444345,
"node_id": "MDQ6VXNlcjIwNDQ0MzQ1",
"avatar_url": "https://avatars.githubusercontent.com/u/20444345?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/johnnv1",
"html_url": "https://github.com/johnnv1",
"followers_url": "https://api.github.com/users/johnnv1/followers",
"following_url": "https://api.github.com/users/johnnv1/following{/other_user}",
"gists_url": "https://api.github.com/users/johnnv1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/johnnv1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/johnnv1/subscriptions",
"organizations_url": "https://api.github.com/users/johnnv1/orgs",
"repos_url": "https://api.github.com/users/johnnv1/repos",
"events_url": "https://api.github.com/users/johnnv1/events{/privacy}",
"received_events_url": "https://api.github.com/users/johnnv1/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067376369,
"node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request",
"name": "dataset request",
"color": "e99695",
"default": false,
"description": "Requesting to add a new dataset"
}
] | closed | false | null | [] | null | [
"I will add this directly on the hub (same as #4486)βin https://huggingface.co/lapix"
] | "2022-06-24T20:07:54" | "2022-07-06T19:03:02" | "2022-07-06T19:03:02" | NONE | null | ## Adding a Dataset
- **Name:** UFSC OCPap: Papanicolaou Stained Oral Cytology Dataset (v4)
- **Description:** The UFSC OCPap dataset comprises 9,797 labeled images of 1200x1600 pixels acquired from 5 slides of cancer diagnosed and 3 healthy of oral brush samples, from distinct patients.
- **Paper:** https://dx.doi.org/10.2139/ssrn.4119212
- **Data:** https://data.mendeley.com/datasets/dr7ydy9xbk/1
- **Motivation:** real data of pap stained oral cytology samples
Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4565/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4565/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4564 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4564/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4564/comments | https://api.github.com/repos/huggingface/datasets/issues/4564/events | https://github.com/huggingface/datasets/pull/4564 | 1,283,932,333 | PR_kwDODunzps46UqUN | 4,564 | Support streaming bookcorpus dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-24T16:13:39" | "2022-07-06T09:34:48" | "2022-07-06T09:23:04" | MEMBER | null | Support streaming bookcorpus dataset. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4564/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4564/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4564",
"html_url": "https://github.com/huggingface/datasets/pull/4564",
"diff_url": "https://github.com/huggingface/datasets/pull/4564.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4564.patch",
"merged_at": "2022-07-06T09:23:04"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4563 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4563/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4563/comments | https://api.github.com/repos/huggingface/datasets/issues/4563/events | https://github.com/huggingface/datasets/pull/4563 | 1,283,914,383 | PR_kwDODunzps46UmZQ | 4,563 | Support streaming allocine dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-24T15:55:03" | "2022-06-24T16:54:57" | "2022-06-24T16:44:41" | MEMBER | null | Support streaming allocine dataset.
Fix #4562. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4563/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4563/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4563",
"html_url": "https://github.com/huggingface/datasets/pull/4563",
"diff_url": "https://github.com/huggingface/datasets/pull/4563.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4563.patch",
"merged_at": "2022-06-24T16:44:41"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4562 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4562/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4562/comments | https://api.github.com/repos/huggingface/datasets/issues/4562/events | https://github.com/huggingface/datasets/issues/4562 | 1,283,779,557 | I_kwDODunzps5MhOvl | 4,562 | Dataset Viewer issue for allocine | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"I removed my assignment as @huggingface/datasets should be able to answer better than me\r\n",
"Let me have a look...",
"Thanks for the quick fix @albertvillanova ",
"Note that the underlying issue is that datasets containing TAR files are not streamable out of the box: they need being iterated with `dl_manager.iter_archive` to avoid performance issues because they access their file content *sequentially* (no random access).",
"> Note that the underlying issue is that datasets containing TAR files are not streamable out of the box: they need being iterated with `dl_manager.iter_archive` to avoid performance issues because they access their file content _sequentially_ (no random access).\r\n\r\nAh thanks for the clarification! I'll look out for this next time and implement the fix myself :)"
] | "2022-06-24T13:50:38" | "2022-06-27T06:39:32" | "2022-06-24T16:44:41" | MEMBER | null | ### Link
https://huggingface.co/datasets/allocine
### Description
Not sure if this is a problem with `bz2` compression, but I thought these datasets could be streamed:
```
Status code: 400
Exception: AttributeError
Message: 'TarContainedFile' object has no attribute 'readable'
```
### Owner
No | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4562/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4562/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4561 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4561/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4561/comments | https://api.github.com/repos/huggingface/datasets/issues/4561/events | https://github.com/huggingface/datasets/pull/4561 | 1,283,624,242 | PR_kwDODunzps46TnVe | 4,561 | Add evaluation data to acronym_identification | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-24T11:17:33" | "2022-06-27T09:37:55" | "2022-06-27T08:49:22" | MEMBER | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4561/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4561/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4561",
"html_url": "https://github.com/huggingface/datasets/pull/4561",
"diff_url": "https://github.com/huggingface/datasets/pull/4561.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4561.patch",
"merged_at": "2022-06-27T08:49:22"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4560 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4560/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4560/comments | https://api.github.com/repos/huggingface/datasets/issues/4560/events | https://github.com/huggingface/datasets/pull/4560 | 1,283,558,873 | PR_kwDODunzps46TY9n | 4,560 | Add evaluation metadata to imagenet-1k | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 4564477500,
"node_id": "LA_kwDODunzps8AAAABEBBmPA",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution",
"name": "dataset contribution",
"color": "0e8a16",
"default": false,
"description": "Contribution to a dataset script"
}
] | closed | false | null | [] | null | [] | "2022-06-24T10:12:41" | "2022-09-23T09:39:53" | "2022-09-23T09:37:03" | MEMBER | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4560/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4560/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4560",
"html_url": "https://github.com/huggingface/datasets/pull/4560",
"diff_url": "https://github.com/huggingface/datasets/pull/4560.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4560.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4559 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4559/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4559/comments | https://api.github.com/repos/huggingface/datasets/issues/4559/events | https://github.com/huggingface/datasets/pull/4559 | 1,283,544,937 | PR_kwDODunzps46TV7- | 4,559 | Add action names in schema_guided_dstc8 dataset card | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-24T10:00:01" | "2022-06-24T10:54:28" | "2022-06-24T10:43:47" | MEMBER | null | As aseked in https://huggingface.co/datasets/schema_guided_dstc8/discussions/1, I added the action names in the dataset card | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4559/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4559/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4559",
"html_url": "https://github.com/huggingface/datasets/pull/4559",
"diff_url": "https://github.com/huggingface/datasets/pull/4559.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4559.patch",
"merged_at": "2022-06-24T10:43:47"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4558 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4558/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4558/comments | https://api.github.com/repos/huggingface/datasets/issues/4558/events | https://github.com/huggingface/datasets/pull/4558 | 1,283,479,650 | PR_kwDODunzps46THl_ | 4,558 | Add evaluation metadata to wmt14 | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 4564477500,
"node_id": "LA_kwDODunzps8AAAABEBBmPA",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution",
"name": "dataset contribution",
"color": "0e8a16",
"default": false,
"description": "Contribution to a dataset script"
}
] | closed | false | null | [] | null | [] | "2022-06-24T09:08:54" | "2022-09-23T09:36:50" | "2022-09-23T09:36:50" | MEMBER | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4558/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4558/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4558",
"html_url": "https://github.com/huggingface/datasets/pull/4558",
"diff_url": "https://github.com/huggingface/datasets/pull/4558.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4558.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4557 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4557/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4557/comments | https://api.github.com/repos/huggingface/datasets/issues/4557/events | https://github.com/huggingface/datasets/pull/4557 | 1,283,473,889 | PR_kwDODunzps46TGZK | 4,557 | Add evaluation metadata to wmt16 | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 4564477500,
"node_id": "LA_kwDODunzps8AAAABEBBmPA",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution",
"name": "dataset contribution",
"color": "0e8a16",
"default": false,
"description": "Contribution to a dataset script"
}
] | closed | false | null | [] | null | [] | "2022-06-24T09:04:23" | "2022-09-23T09:36:32" | "2022-09-23T09:36:32" | MEMBER | null | Just to confirm: we should add this metadata via GitHub and not Hub PRs for canonical datasets right? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4557/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4557/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4557",
"html_url": "https://github.com/huggingface/datasets/pull/4557",
"diff_url": "https://github.com/huggingface/datasets/pull/4557.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4557.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4556 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4556/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4556/comments | https://api.github.com/repos/huggingface/datasets/issues/4556/events | https://github.com/huggingface/datasets/issues/4556 | 1,283,462,881 | I_kwDODunzps5MgBbh | 4,556 | Dataset Viewer issue for conll2003 | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Fixed, thanks."
] | "2022-06-24T08:55:18" | "2022-06-24T09:50:39" | "2022-06-24T09:50:39" | MEMBER | null | ### Link
https://huggingface.co/datasets/conll2003/viewer/conll2003/test
### Description
Seems like a cache problem with this config / split:
```
Server error
Status code: 400
Exception: FileNotFoundError
Message: [Errno 2] No such file or directory: '/cache/modules/datasets_modules/datasets/conll2003/__init__.py'
```
### Owner
No | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4556/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4556/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4555 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4555/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4555/comments | https://api.github.com/repos/huggingface/datasets/issues/4555/events | https://github.com/huggingface/datasets/issues/4555 | 1,283,451,651 | I_kwDODunzps5Mf-sD | 4,555 | Dataset Viewer issue for xtreme | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Fixed, thanks."
] | "2022-06-24T08:46:08" | "2022-06-24T09:50:45" | "2022-06-24T09:50:45" | MEMBER | null | ### Link
https://huggingface.co/datasets/xtreme/viewer/PAN-X.de/test
### Description
There seems to be a problem with the cache of this config / split:
```
Server error
Status code: 400
Exception: FileNotFoundError
Message: [Errno 2] No such file or directory: '/cache/modules/datasets_modules/datasets/xtreme/349258adc25bb45e47de193222f95e68a44f7a7ab53c4283b3f007208a11bf7e/xtreme.py'
```
### Owner
No | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4555/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4555/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4554 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4554/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4554/comments | https://api.github.com/repos/huggingface/datasets/issues/4554/events | https://github.com/huggingface/datasets/pull/4554 | 1,283,369,453 | PR_kwDODunzps46Sv_f | 4,554 | Fix WMT dataset loading issue and docs update (Re-opened) | {
"login": "khushmeeet",
"id": 8711912,
"node_id": "MDQ6VXNlcjg3MTE5MTI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8711912?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/khushmeeet",
"html_url": "https://github.com/khushmeeet",
"followers_url": "https://api.github.com/users/khushmeeet/followers",
"following_url": "https://api.github.com/users/khushmeeet/following{/other_user}",
"gists_url": "https://api.github.com/users/khushmeeet/gists{/gist_id}",
"starred_url": "https://api.github.com/users/khushmeeet/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/khushmeeet/subscriptions",
"organizations_url": "https://api.github.com/users/khushmeeet/orgs",
"repos_url": "https://api.github.com/users/khushmeeet/repos",
"events_url": "https://api.github.com/users/khushmeeet/events{/privacy}",
"received_events_url": "https://api.github.com/users/khushmeeet/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-24T07:26:16" | "2022-07-08T15:39:20" | "2022-07-08T15:27:44" | CONTRIBUTOR | null | This PR is a fix for #4354
Changes are made for `wmt14`, `wmt15`, `wmt16`, `wmt17`, `wmt18`, `wmt19` and `wmt_t2t`. And READMEs are updated for the corresponding datasets.
Let me know, if any additional changes are required.
Thanks | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4554/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4554/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4554",
"html_url": "https://github.com/huggingface/datasets/pull/4554",
"diff_url": "https://github.com/huggingface/datasets/pull/4554.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4554.patch",
"merged_at": "2022-07-08T15:27:44"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4553 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4553/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4553/comments | https://api.github.com/repos/huggingface/datasets/issues/4553/events | https://github.com/huggingface/datasets/pull/4553 | 1,282,779,560 | PR_kwDODunzps46Q1q7 | 4,553 | Stop dropping columns in to_tf_dataset() before we load batches | {
"login": "Rocketknight1",
"id": 12866554,
"node_id": "MDQ6VXNlcjEyODY2NTU0",
"avatar_url": "https://avatars.githubusercontent.com/u/12866554?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Rocketknight1",
"html_url": "https://github.com/Rocketknight1",
"followers_url": "https://api.github.com/users/Rocketknight1/followers",
"following_url": "https://api.github.com/users/Rocketknight1/following{/other_user}",
"gists_url": "https://api.github.com/users/Rocketknight1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Rocketknight1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Rocketknight1/subscriptions",
"organizations_url": "https://api.github.com/users/Rocketknight1/orgs",
"repos_url": "https://api.github.com/users/Rocketknight1/repos",
"events_url": "https://api.github.com/users/Rocketknight1/events{/privacy}",
"received_events_url": "https://api.github.com/users/Rocketknight1/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-23T18:21:05" | "2022-07-04T19:00:13" | "2022-07-04T18:49:01" | MEMBER | null | `to_tf_dataset()` dropped unnecessary columns before loading batches from the dataset, but this is causing problems when using a transform, because the dropped columns might be needed to compute the transform. Since there's no real way to check which columns the transform might need, we skip dropping columns and instead drop keys from the batch after we load it.
cc @amyeroberts and https://github.com/huggingface/notebooks/pull/202 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4553/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4553/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4553",
"html_url": "https://github.com/huggingface/datasets/pull/4553",
"diff_url": "https://github.com/huggingface/datasets/pull/4553.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4553.patch",
"merged_at": "2022-07-04T18:49:01"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4552 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4552/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4552/comments | https://api.github.com/repos/huggingface/datasets/issues/4552/events | https://github.com/huggingface/datasets/pull/4552 | 1,282,615,646 | PR_kwDODunzps46QSHV | 4,552 | Tell users to upload on the hub directly | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-23T15:47:52" | "2022-06-26T15:49:46" | "2022-06-26T15:39:11" | MEMBER | null | As noted in https://github.com/huggingface/datasets/pull/4534, it is still not clear that it is recommended to add datasets on the Hugging Face Hub directly instead of GitHub, so I updated some docs.
Moreover since users won't be able to get reviews from us on the Hub, I added a paragraph to tell users that they can open a discussion and tag `datasets` maintainers for reviews.
Finally I removed the _previous good reasons_ to add a dataset on GitHub to only keep this one:
> In some rare cases it makes more sense to open a PR on GitHub. For example when you are not the author of the dataset and there is no clear organization / namespace that you can put the dataset under.
Does it sound good to you @albertvillanova @julien-c ? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4552/reactions",
"total_count": 3,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 3,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4552/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4552",
"html_url": "https://github.com/huggingface/datasets/pull/4552",
"diff_url": "https://github.com/huggingface/datasets/pull/4552.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4552.patch",
"merged_at": "2022-06-26T15:39:11"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4551 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4551/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4551/comments | https://api.github.com/repos/huggingface/datasets/issues/4551/events | https://github.com/huggingface/datasets/pull/4551 | 1,282,534,807 | PR_kwDODunzps46QAV- | 4,551 | Perform hidden file check on relative data file path | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-23T14:49:11" | "2022-06-30T14:49:20" | "2022-06-30T14:38:18" | CONTRIBUTOR | null | Fix #4549 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4551/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4551/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4551",
"html_url": "https://github.com/huggingface/datasets/pull/4551",
"diff_url": "https://github.com/huggingface/datasets/pull/4551.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4551.patch",
"merged_at": "2022-06-30T14:38:18"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4550 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4550/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4550/comments | https://api.github.com/repos/huggingface/datasets/issues/4550/events | https://github.com/huggingface/datasets/issues/4550 | 1,282,374,441 | I_kwDODunzps5Mb3sp | 4,550 | imdb source error | {
"login": "Muhtasham",
"id": 20128202,
"node_id": "MDQ6VXNlcjIwMTI4MjAy",
"avatar_url": "https://avatars.githubusercontent.com/u/20128202?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Muhtasham",
"html_url": "https://github.com/Muhtasham",
"followers_url": "https://api.github.com/users/Muhtasham/followers",
"following_url": "https://api.github.com/users/Muhtasham/following{/other_user}",
"gists_url": "https://api.github.com/users/Muhtasham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Muhtasham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Muhtasham/subscriptions",
"organizations_url": "https://api.github.com/users/Muhtasham/orgs",
"repos_url": "https://api.github.com/users/Muhtasham/repos",
"events_url": "https://api.github.com/users/Muhtasham/events{/privacy}",
"received_events_url": "https://api.github.com/users/Muhtasham/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Thanks for reporting, @Muhtasham.\r\n\r\nIndeed IMDB dataset is not accessible from yesterday, because the data is hosted on the data owners servers at Stanford (http://ai.stanford.edu/) and these are down due to a power outage originated by a fire: https://twitter.com/StanfordAILab/status/1539472302399623170?s=20&t=1HU1hrtaXprtn14U61P55w\r\n\r\nAs a temporary workaroud, you can load the IMDB dataset with this tweak:\r\n```python\r\nds = load_dataset(\"imdb\", revision=\"tmp-fix-imdb\")\r\n```\r\n"
] | "2022-06-23T13:02:52" | "2022-06-23T13:47:05" | "2022-06-23T13:47:04" | NONE | null | ## Describe the bug
imdb dataset not loading
## Steps to reproduce the bug
```python
from datasets import load_dataset
dataset = load_dataset("imdb")
```
## Expected results
## Actual results
```bash
06/23/2022 14:45:18 - INFO - datasets.builder - Dataset not on Hf google storage. Downloading and preparing it from source
06/23/2022 14:46:34 - INFO - datasets.utils.file_utils - HEAD request to http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz timed out, retrying... [1.0]
.....
ConnectionError: Couldn't reach http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz (ConnectTimeout(MaxRetryError("HTTPConnectionPool(host='ai.stanford.edu', port=80): Max retries exceeded with url: /~amaas/data/sentiment/aclImdb_v1.tar.gz (Caused by ConnectTimeoutError(<urllib3.connection.HTTPConnection object at 0x7f2d750cf690>, 'Connection to ai.stanford.edu timed out. (connect timeout=100)'))")))
```
## Environment info
- `datasets` version: 2.3.2
- Platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic
- Python version: 3.7.13
- PyArrow version: 6.0.1
- Pandas version: 1.3.5
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4550/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4550/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4549 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4549/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4549/comments | https://api.github.com/repos/huggingface/datasets/issues/4549/events | https://github.com/huggingface/datasets/issues/4549 | 1,282,312,975 | I_kwDODunzps5MbosP | 4,549 | FileNotFoundError when passing a data_file inside a directory starting with double underscores | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"I have consistently experienced this bug on GitHub actions when bumping to `2.3.2`",
"We're working on a fix ;)"
] | "2022-06-23T12:19:24" | "2022-06-30T14:38:18" | "2022-06-30T14:38:18" | MEMBER | null | Bug experienced in the `accelerate` CI: https://github.com/huggingface/accelerate/runs/7016055148?check_suite_focus=true
This is related to https://github.com/huggingface/datasets/pull/4505 and the changes from https://github.com/huggingface/datasets/pull/4412 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4549/reactions",
"total_count": 2,
"+1": 2,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4549/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4548 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4548/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4548/comments | https://api.github.com/repos/huggingface/datasets/issues/4548/events | https://github.com/huggingface/datasets/issues/4548 | 1,282,218,096 | I_kwDODunzps5MbRhw | 4,548 | Metadata.jsonl for Imagefolder is ignored if it's in a parent directory to the splits directories/do not have "{split}_" prefix | {
"login": "polinaeterna",
"id": 16348744,
"node_id": "MDQ6VXNlcjE2MzQ4NzQ0",
"avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/polinaeterna",
"html_url": "https://github.com/polinaeterna",
"followers_url": "https://api.github.com/users/polinaeterna/followers",
"following_url": "https://api.github.com/users/polinaeterna/following{/other_user}",
"gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}",
"starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions",
"organizations_url": "https://api.github.com/users/polinaeterna/orgs",
"repos_url": "https://api.github.com/users/polinaeterna/repos",
"events_url": "https://api.github.com/users/polinaeterna/events{/privacy}",
"received_events_url": "https://api.github.com/users/polinaeterna/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"I agree it would be nice to support this. It doesn't fit really well in the current data_files.py, where files of each splits are separated in different folder though, maybe we have to modify a bit the logic here. \r\n\r\nOne idea would be to extend `get_patterns_in_dataset_repository` and `get_patterns_locally` to additionally check for `metadata.json`, but feel free to comment if you have better ideas (I feel like we're reaching the limits of what the current implementation IMO, so we could think of a different way of resolving the data files if necessary)"
] | "2022-06-23T10:58:57" | "2022-06-30T10:15:32" | "2022-06-30T10:15:32" | CONTRIBUTOR | null | If data contains a single `metadata.jsonl` file for several splits, it won't be included in a dataset's `data_files` and therefore ignored.
This happens when a directory is structured like as follows:
```
train/
file_1.jpg
file_2.jpg
test/
file_3.jpg
file_4.jpg
metadata.jsonl
```
or like as follows:
```
train_file_1.jpg
train_file_2.jpg
test_file_3.jpg
test_file_4.jpg
metadata.jsonl
```
The same for HF repos.
because it's ignored by the patterns [here](https://github.com/huggingface/datasets/blob/master/src/datasets/data_files.py#L29)
@lhoestq @mariosasko Do you think it's better to add this functionality in `data_files.py` or just specifically in imagefolder/audiofolder code? In `data_files.py` would me more general but I don't know if there are any other cases when that might be needed.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4548/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4548/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4547 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4547/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4547/comments | https://api.github.com/repos/huggingface/datasets/issues/4547/events | https://github.com/huggingface/datasets/pull/4547 | 1,282,160,517 | PR_kwDODunzps46Ot5u | 4,547 | [CI] Fix some warnings | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-23T10:10:49" | "2022-06-28T14:10:57" | "2022-06-28T13:59:54" | MEMBER | null | There are some warnings in the CI that are annoying, I tried to remove most of them | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4547/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4547/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4547",
"html_url": "https://github.com/huggingface/datasets/pull/4547",
"diff_url": "https://github.com/huggingface/datasets/pull/4547.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4547.patch",
"merged_at": "2022-06-28T13:59:54"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4546 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4546/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4546/comments | https://api.github.com/repos/huggingface/datasets/issues/4546/events | https://github.com/huggingface/datasets/pull/4546 | 1,282,093,288 | PR_kwDODunzps46Oe_K | 4,546 | [CI] fixing seqeval install in ci by pinning setuptools-scm | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-23T09:24:37" | "2022-06-23T10:24:16" | "2022-06-23T10:13:44" | MEMBER | null | The latest setuptools-scm version supported on 3.6 is 6.4.2. However for some reason circleci has version 7, which doesn't work.
I fixed this by pinning the version of setuptools-scm in the circleci job
Fix https://github.com/huggingface/datasets/issues/4544 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4546/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4546/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4546",
"html_url": "https://github.com/huggingface/datasets/pull/4546",
"diff_url": "https://github.com/huggingface/datasets/pull/4546.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4546.patch",
"merged_at": "2022-06-23T10:13:44"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4545 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4545/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4545/comments | https://api.github.com/repos/huggingface/datasets/issues/4545/events | https://github.com/huggingface/datasets/pull/4545 | 1,280,899,028 | PR_kwDODunzps46KV-y | 4,545 | Make DuplicateKeysError more user friendly [For Issue #2556] | {
"login": "VijayKalmath",
"id": 20517962,
"node_id": "MDQ6VXNlcjIwNTE3OTYy",
"avatar_url": "https://avatars.githubusercontent.com/u/20517962?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VijayKalmath",
"html_url": "https://github.com/VijayKalmath",
"followers_url": "https://api.github.com/users/VijayKalmath/followers",
"following_url": "https://api.github.com/users/VijayKalmath/following{/other_user}",
"gists_url": "https://api.github.com/users/VijayKalmath/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VijayKalmath/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VijayKalmath/subscriptions",
"organizations_url": "https://api.github.com/users/VijayKalmath/orgs",
"repos_url": "https://api.github.com/users/VijayKalmath/repos",
"events_url": "https://api.github.com/users/VijayKalmath/events{/privacy}",
"received_events_url": "https://api.github.com/users/VijayKalmath/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-22T21:01:34" | "2022-06-28T09:37:06" | "2022-06-28T09:26:04" | CONTRIBUTOR | null | # What does this PR do?
## Summary
*DuplicateKeysError error does not provide any information regarding the examples which have the same the key.*
*This information is very helpful for debugging the dataset generator script.*
## Additions
-
## Changes
- Changed `DuplicateKeysError Class` in `src/datasets/keyhash.py` to add current index and duplicate_key_indices to error message.
- Changed `check_duplicate_keys` function in `src/datasets/arrow_writer.py` to find indices of examples with duplicate hash if duplicate keys are found.
## Deletions
-
## To do :
- [x] Find way to find and print path `<Path to Dataset>` in Error message
## Issues Addressed :
Fixes #2556 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4545/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4545/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4545",
"html_url": "https://github.com/huggingface/datasets/pull/4545",
"diff_url": "https://github.com/huggingface/datasets/pull/4545.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4545.patch",
"merged_at": "2022-06-28T09:26:04"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4544 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4544/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4544/comments | https://api.github.com/repos/huggingface/datasets/issues/4544/events | https://github.com/huggingface/datasets/issues/4544 | 1,280,500,340 | I_kwDODunzps5MUuJ0 | 4,544 | [CI] seqeval installation fails sometimes on python 3.6 | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [] | "2022-06-22T16:35:23" | "2022-06-23T10:13:44" | "2022-06-23T10:13:44" | MEMBER | null | The CI sometimes fails to install seqeval, which cause the `seqeval` metric tests to fail.
The installation fails because of this error:
```
Collecting seqeval
Downloading seqeval-1.2.2.tar.gz (43 kB)
|ββββββββ | 10 kB 42.1 MB/s eta 0:00:01
|βββββββββββββββ | 20 kB 53.3 MB/s eta 0:00:01
|βββββββββββββββββββββββ | 30 kB 67.2 MB/s eta 0:00:01
|ββββββββββββββββββββββββββββββ | 40 kB 76.1 MB/s eta 0:00:01
|ββββββββββββββββββββββββββββββββ| 43 kB 10.0 MB/s
Preparing metadata (setup.py) ... - error
ERROR: Command errored out with exit status 1:
command: /home/circleci/.pyenv/versions/3.6.15/bin/python3.6 -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-1l96tbyj/seqeval_b31086f711d84743abe6905d2aa9dade/setup.py'"'"'; __file__='"'"'/tmp/pip-install-1l96tbyj/seqeval_b31086f711d84743abe6905d2aa9dade/setup.py'"'"';f = getattr(tokenize, '"'"'open'"'"', open)(__file__) if os.path.exists(__file__) else io.StringIO('"'"'from setuptools import setup; setup()'"'"');code = f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base /tmp/pip-pip-egg-info-pf54_vqy
cwd: /tmp/pip-install-1l96tbyj/seqeval_b31086f711d84743abe6905d2aa9dade/
Complete output (22 lines):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-1l96tbyj/seqeval_b31086f711d84743abe6905d2aa9dade/setup.py", line 56, in <module>
'Programming Language :: Python :: Implementation :: PyPy'
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/site-packages/setuptools/__init__.py", line 143, in setup
return distutils.core.setup(**attrs)
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/site-packages/setuptools/dist.py", line 442, in __init__
k: v for k, v in attrs.items()
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/site-packages/setuptools/dist.py", line 601, in finalize_options
ep.load()(self, ep.name, value)
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2346, in load
return self.resolve()
File "/home/circleci/.pyenv/versions/3.6.15/lib/python3.6/site-packages/pkg_resources/__init__.py", line 2352, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "/tmp/pip-install-1l96tbyj/seqeval_b31086f711d84743abe6905d2aa9dade/.eggs/setuptools_scm-7.0.2-py3.6.egg/setuptools_scm/__init__.py", line 5
from __future__ import annotations
^
SyntaxError: future feature annotations is not defined
----------------------------------------
WARNING: Discarding https://files.pythonhosted.org/packages/9d/2d/233c79d5b4e5ab1dbf111242299153f3caddddbb691219f363ad55ce783d/seqeval-1.2.2.tar.gz#sha256=f28e97c3ab96d6fcd32b648f6438ff2e09cfba87f05939da9b3970713ec56e6f (from https://pypi.org/simple/seqeval/). Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
```
for example in https://app.circleci.com/pipelines/github/huggingface/datasets/12665/workflows/93878eb9-a923-4b35-b2e7-c5e9b22f10ad/jobs/75300
Here is a diff of the pip install logs until the error is reached: https://www.diffchecker.com/VkQDLeQT
This could be caused by the latest updates of setuptools-scm | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4544/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4544/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4543 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4543/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4543/comments | https://api.github.com/repos/huggingface/datasets/issues/4543/events | https://github.com/huggingface/datasets/pull/4543 | 1,280,379,781 | PR_kwDODunzps46IiEp | 4,543 | [CI] Fix upstream hub test url | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-22T15:34:27" | "2022-06-22T16:37:40" | "2022-06-22T16:27:37" | MEMBER | null | Some tests were still using moon-stagign instead of hub-ci.
I also updated the token to use one dedicated to `datasets` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4543/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4543/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4543",
"html_url": "https://github.com/huggingface/datasets/pull/4543",
"diff_url": "https://github.com/huggingface/datasets/pull/4543.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4543.patch",
"merged_at": "2022-06-22T16:27:37"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4541 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4541/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4541/comments | https://api.github.com/repos/huggingface/datasets/issues/4541/events | https://github.com/huggingface/datasets/pull/4541 | 1,280,161,436 | PR_kwDODunzps46HyPK | 4,541 | Fix timestamp conversion from Pandas to Python datetime in streaming mode | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-22T13:40:01" | "2022-06-22T16:39:27" | "2022-06-22T16:29:09" | MEMBER | null | Arrow accepts both pd.Timestamp and datetime.datetime objects to create timestamp arrays.
However a timestamp array is always converted to datetime.datetime objects.
This created an inconsistency between streaming in non-streaming. e.g. the `ett` dataset outputs datetime.datetime objects in non-streaming but pd.timestamp in streaming.
I fixed this by always converting pd.Timestamp to datetime.datetime during the example encoding step.
I fixed the same issue for pd.Timedelta as well. Finally I added an extra step of conversion for Series and DataFrame to take this into account in case such data are passed as Series or DataFrame.
Fix https://github.com/huggingface/datasets/issues/4533
Related to https://github.com/huggingface/datasets-server/issues/397 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4541/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4541/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4541",
"html_url": "https://github.com/huggingface/datasets/pull/4541",
"diff_url": "https://github.com/huggingface/datasets/pull/4541.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4541.patch",
"merged_at": "2022-06-22T16:29:09"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4540 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4540/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4540/comments | https://api.github.com/repos/huggingface/datasets/issues/4540/events | https://github.com/huggingface/datasets/issues/4540 | 1,280,142,942 | I_kwDODunzps5MTW5e | 4,540 | Avoid splitting by` .py` for the file. | {
"login": "espoirMur",
"id": 18573157,
"node_id": "MDQ6VXNlcjE4NTczMTU3",
"avatar_url": "https://avatars.githubusercontent.com/u/18573157?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/espoirMur",
"html_url": "https://github.com/espoirMur",
"followers_url": "https://api.github.com/users/espoirMur/followers",
"following_url": "https://api.github.com/users/espoirMur/following{/other_user}",
"gists_url": "https://api.github.com/users/espoirMur/gists{/gist_id}",
"starred_url": "https://api.github.com/users/espoirMur/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/espoirMur/subscriptions",
"organizations_url": "https://api.github.com/users/espoirMur/orgs",
"repos_url": "https://api.github.com/users/espoirMur/repos",
"events_url": "https://api.github.com/users/espoirMur/events{/privacy}",
"received_events_url": "https://api.github.com/users/espoirMur/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892877,
"node_id": "MDU6TGFiZWwxOTM1ODkyODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue",
"name": "good first issue",
"color": "7057ff",
"default": true,
"description": "Good for newcomers"
}
] | closed | false | {
"login": "VijayKalmath",
"id": 20517962,
"node_id": "MDQ6VXNlcjIwNTE3OTYy",
"avatar_url": "https://avatars.githubusercontent.com/u/20517962?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VijayKalmath",
"html_url": "https://github.com/VijayKalmath",
"followers_url": "https://api.github.com/users/VijayKalmath/followers",
"following_url": "https://api.github.com/users/VijayKalmath/following{/other_user}",
"gists_url": "https://api.github.com/users/VijayKalmath/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VijayKalmath/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VijayKalmath/subscriptions",
"organizations_url": "https://api.github.com/users/VijayKalmath/orgs",
"repos_url": "https://api.github.com/users/VijayKalmath/repos",
"events_url": "https://api.github.com/users/VijayKalmath/events{/privacy}",
"received_events_url": "https://api.github.com/users/VijayKalmath/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "VijayKalmath",
"id": 20517962,
"node_id": "MDQ6VXNlcjIwNTE3OTYy",
"avatar_url": "https://avatars.githubusercontent.com/u/20517962?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VijayKalmath",
"html_url": "https://github.com/VijayKalmath",
"followers_url": "https://api.github.com/users/VijayKalmath/followers",
"following_url": "https://api.github.com/users/VijayKalmath/following{/other_user}",
"gists_url": "https://api.github.com/users/VijayKalmath/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VijayKalmath/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VijayKalmath/subscriptions",
"organizations_url": "https://api.github.com/users/VijayKalmath/orgs",
"repos_url": "https://api.github.com/users/VijayKalmath/repos",
"events_url": "https://api.github.com/users/VijayKalmath/events{/privacy}",
"received_events_url": "https://api.github.com/users/VijayKalmath/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @espoirMur, thanks for reporting.\r\n\r\nYou are right: that code line could be improved and made more generically valid.\r\n\r\nOn the other hand, I would suggest using `os.path.splitext` instead.\r\n\r\nAre you willing to open a PR? :)",
"I will have a look.. \r\n\r\nThis weekend .. ",
"@albertvillanova , Can you have a look at #4590. \r\n\r\nThanks ",
"#self-assign"
] | "2022-06-22T13:26:55" | "2022-07-07T13:17:44" | "2022-07-07T13:17:44" | NONE | null | https://github.com/huggingface/datasets/blob/90b3a98065556fc66380cafd780af9b1814b9426/src/datasets/load.py#L272
Hello,
Thanks you for this library .
I was using it and I had one edge case. my home folder name ends with `.py` it is `/home/espoir.py` so anytime I am running the code to load a local module this code here it is failing because after splitting it is trying to save the code to my home directory.
Step to reproduce.
- If you have a home folder which ends with `.py`
- load a module with a local folder
`qa_dataset = load_dataset("src/data/build_qa_dataset.py")`
it is failed
A possible workaround would be to use pathlib at the mentioned line
` meta_path = Path(importable_local_file).parent.joinpath("metadata.json")` this can alivate the issue .
Let me what are your thought on this and I can try to fix it by A PR.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4540/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4540/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4539 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4539/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4539/comments | https://api.github.com/repos/huggingface/datasets/issues/4539/events | https://github.com/huggingface/datasets/pull/4539 | 1,279,779,829 | PR_kwDODunzps46GfWv | 4,539 | Replace deprecated logging.warn with logging.warning | {
"login": "hugovk",
"id": 1324225,
"node_id": "MDQ6VXNlcjEzMjQyMjU=",
"avatar_url": "https://avatars.githubusercontent.com/u/1324225?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/hugovk",
"html_url": "https://github.com/hugovk",
"followers_url": "https://api.github.com/users/hugovk/followers",
"following_url": "https://api.github.com/users/hugovk/following{/other_user}",
"gists_url": "https://api.github.com/users/hugovk/gists{/gist_id}",
"starred_url": "https://api.github.com/users/hugovk/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/hugovk/subscriptions",
"organizations_url": "https://api.github.com/users/hugovk/orgs",
"repos_url": "https://api.github.com/users/hugovk/repos",
"events_url": "https://api.github.com/users/hugovk/events{/privacy}",
"received_events_url": "https://api.github.com/users/hugovk/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-22T08:32:29" | "2022-06-22T13:43:23" | "2022-06-22T12:51:51" | CONTRIBUTOR | null | Replace `logging.warn` (deprecated in [Python 2.7, 2011](https://github.com/python/cpython/commit/04d5bc00a219860c69ea17eaa633d3ab9917409f)) with `logging.warning` (added in [Python 2.3, 2003](https://github.com/python/cpython/commit/6fa635df7aa88ae9fd8b41ae42743341316c90f7)).
* https://docs.python.org/3/library/logging.html#logging.Logger.warning
* https://github.com/python/cpython/issues/57444
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4539/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4539/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4539",
"html_url": "https://github.com/huggingface/datasets/pull/4539",
"diff_url": "https://github.com/huggingface/datasets/pull/4539.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4539.patch",
"merged_at": "2022-06-22T12:51:51"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4538 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4538/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4538/comments | https://api.github.com/repos/huggingface/datasets/issues/4538/events | https://github.com/huggingface/datasets/issues/4538 | 1,279,409,786 | I_kwDODunzps5MQj56 | 4,538 | Dataset Viewer issue for Pile of Law | {
"login": "Breakend",
"id": 1609857,
"node_id": "MDQ6VXNlcjE2MDk4NTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/1609857?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Breakend",
"html_url": "https://github.com/Breakend",
"followers_url": "https://api.github.com/users/Breakend/followers",
"following_url": "https://api.github.com/users/Breakend/following{/other_user}",
"gists_url": "https://api.github.com/users/Breakend/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Breakend/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Breakend/subscriptions",
"organizations_url": "https://api.github.com/users/Breakend/orgs",
"repos_url": "https://api.github.com/users/Breakend/repos",
"events_url": "https://api.github.com/users/Breakend/events{/privacy}",
"received_events_url": "https://api.github.com/users/Breakend/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @Breakend, yes β we'll propose a solution today",
"Thanks so much, I appreciate it!",
"Thanks so much for adding the docs. I was able to successfully hide the viewer using the \r\n```\r\nviewer: false\r\n```\r\nflag in the README.md of the dataset. I'm closing the issue because this is resolved. Thanks again!",
"Awesome! Thanks for confirming. cc @severo ",
"Just for the record:\r\n\r\n- the doc\r\n \r\n<img width=\"1430\" alt=\"Capture dβeΜcran 2022-06-27 aΜ 09 29 27\" src=\"https://user-images.githubusercontent.com/1676121/175884089-bca6c0d5-6387-473e-98ca-86a910ede4bd.png\">\r\n\r\n- the dataset main page\r\n\r\n<img width=\"1134\" alt=\"Capture dβeΜcran 2022-06-27 aΜ 09 29 05\" src=\"https://user-images.githubusercontent.com/1676121/175884152-5f285bf0-3471-45de-927a-e141b00ebb33.png\">\r\n\r\n- the dataset viewer page\r\n\r\n<img width=\"567\" alt=\"Capture dβeΜcran 2022-06-27 aΜ 09 29 16\" src=\"https://user-images.githubusercontent.com/1676121/175884191-ab6a297b-1c11-417e-bbde-0b7623278a79.png\">\r\n"
] | "2022-06-22T02:48:40" | "2022-06-27T07:30:23" | "2022-06-26T22:26:22" | NONE | null | ### Link
https://huggingface.co/datasets/pile-of-law/pile-of-law
### Description
Hi, I would like to turn off the dataset viewer for our dataset without enabling access requests. To comply with upstream dataset creator requests/licenses, we would like to make sure that the data is not indexed by search engines and so would like to turn off dataset previews. But we do not want to collect user emails because it would violate single blind review, allowing us to deduce potential reviewers' identities. Is there a way that we can turn off the dataset viewer without collecting identity information?
Thanks so much!
### Owner
Yes | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4538/reactions",
"total_count": 3,
"+1": 3,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4538/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4537 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4537/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4537/comments | https://api.github.com/repos/huggingface/datasets/issues/4537/events | https://github.com/huggingface/datasets/pull/4537 | 1,279,144,310 | PR_kwDODunzps46ESJn | 4,537 | Fix WMT dataset loading issue and docs update | {
"login": "khushmeeet",
"id": 8711912,
"node_id": "MDQ6VXNlcjg3MTE5MTI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8711912?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/khushmeeet",
"html_url": "https://github.com/khushmeeet",
"followers_url": "https://api.github.com/users/khushmeeet/followers",
"following_url": "https://api.github.com/users/khushmeeet/following{/other_user}",
"gists_url": "https://api.github.com/users/khushmeeet/gists{/gist_id}",
"starred_url": "https://api.github.com/users/khushmeeet/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/khushmeeet/subscriptions",
"organizations_url": "https://api.github.com/users/khushmeeet/orgs",
"repos_url": "https://api.github.com/users/khushmeeet/repos",
"events_url": "https://api.github.com/users/khushmeeet/events{/privacy}",
"received_events_url": "https://api.github.com/users/khushmeeet/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-21T21:48:02" | "2022-06-24T07:05:43" | "2022-06-24T07:05:10" | CONTRIBUTOR | null | This PR is a fix for #4354
Changes are made for `wmt14`, `wmt15`, `wmt16`, `wmt17`, `wmt18`, `wmt19` and `wmt_t2t`. And READMEs are updated for the corresponding datasets.
As I am on a M1 Mac, I am not able to create a virtual `dev` environment using `pip install -e ".[dev]"`. Issue is with `tensorflow-text` not supported on M1s and there is no supporting repo by Apple or Google. So, if I was needed to perform local testing, I am not able to do that.
Let me know, if any additional changes are required.
Thanks | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4537/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4537/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4537",
"html_url": "https://github.com/huggingface/datasets/pull/4537",
"diff_url": "https://github.com/huggingface/datasets/pull/4537.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4537.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4536 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4536/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4536/comments | https://api.github.com/repos/huggingface/datasets/issues/4536/events | https://github.com/huggingface/datasets/pull/4536 | 1,278,734,727 | PR_kwDODunzps46C2z6 | 4,536 | Properly raise FileNotFound even if the dataset is private | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-21T17:05:50" | "2022-06-28T10:46:51" | "2022-06-28T10:36:10" | MEMBER | null | `tests/test_load.py::test_load_streaming_private_dataset` was failing because the hub now returns 401 when getting the HfApi.dataset_info of a dataset without authentication. `load_dataset` was raising ConnectionError, while it should be FileNoteFoundError since it first checks for local files before checking the Hub.
Moreover when use_auth_token is not set (default is False), we should not pass `token=None` to HfApi.dataset_info, or it will use the local token by default - instead it should use no token. It's currently not possible to ask for no token to be used, so as a workaround I simply set token="no-token" | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4536/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4536/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4536",
"html_url": "https://github.com/huggingface/datasets/pull/4536",
"diff_url": "https://github.com/huggingface/datasets/pull/4536.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4536.patch",
"merged_at": "2022-06-28T10:36:10"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4535 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4535/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4535/comments | https://api.github.com/repos/huggingface/datasets/issues/4535/events | https://github.com/huggingface/datasets/pull/4535 | 1,278,365,039 | PR_kwDODunzps46BnXq | 4,535 | Add `batch_size` parameter when calling `add_faiss_index` and `add_faiss_index_from_external_arrays` | {
"login": "alvarobartt",
"id": 36760800,
"node_id": "MDQ6VXNlcjM2NzYwODAw",
"avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alvarobartt",
"html_url": "https://github.com/alvarobartt",
"followers_url": "https://api.github.com/users/alvarobartt/followers",
"following_url": "https://api.github.com/users/alvarobartt/following{/other_user}",
"gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions",
"organizations_url": "https://api.github.com/users/alvarobartt/orgs",
"repos_url": "https://api.github.com/users/alvarobartt/repos",
"events_url": "https://api.github.com/users/alvarobartt/events{/privacy}",
"received_events_url": "https://api.github.com/users/alvarobartt/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-21T12:18:49" | "2022-06-27T16:25:09" | "2022-06-27T16:14:36" | CONTRIBUTOR | null | Currently, even though the `batch_size` when adding vectors to the FAISS index can be tweaked in `FaissIndex.add_vectors()`, the function `ArrowDataset.add_faiss_index` doesn't have either the parameter `batch_size` to be propagated to the nested `FaissIndex.add_vectors` function or `*args, **kwargs`, so on, this PR adds the `batch_size` parameter to both `ArrowDataset.add_faiss_index` and `ArrowDataset.add_faiss_index_from_external_arrays`.
This is useful so as to tweak the `batch_size` according to the VM specifications. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4535/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4535/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4535",
"html_url": "https://github.com/huggingface/datasets/pull/4535",
"diff_url": "https://github.com/huggingface/datasets/pull/4535.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4535.patch",
"merged_at": "2022-06-27T16:14:36"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4534 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4534/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4534/comments | https://api.github.com/repos/huggingface/datasets/issues/4534/events | https://github.com/huggingface/datasets/pull/4534 | 1,277,897,197 | PR_kwDODunzps46AFK_ | 4,534 | Add `tldr_news` dataset | {
"login": "JulesBelveze",
"id": 32683010,
"node_id": "MDQ6VXNlcjMyNjgzMDEw",
"avatar_url": "https://avatars.githubusercontent.com/u/32683010?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/JulesBelveze",
"html_url": "https://github.com/JulesBelveze",
"followers_url": "https://api.github.com/users/JulesBelveze/followers",
"following_url": "https://api.github.com/users/JulesBelveze/following{/other_user}",
"gists_url": "https://api.github.com/users/JulesBelveze/gists{/gist_id}",
"starred_url": "https://api.github.com/users/JulesBelveze/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JulesBelveze/subscriptions",
"organizations_url": "https://api.github.com/users/JulesBelveze/orgs",
"repos_url": "https://api.github.com/users/JulesBelveze/repos",
"events_url": "https://api.github.com/users/JulesBelveze/events{/privacy}",
"received_events_url": "https://api.github.com/users/JulesBelveze/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-21T05:02:43" | "2022-06-23T14:33:54" | "2022-06-21T14:21:11" | NONE | null | This PR aims at adding support for a news dataset: `tldr news`.
This dataset is based on the daily [tldr tech newsletter](https://tldr.tech/newsletter) and contains a `headline` as well as a `content` for every piece of news contained in a newsletter. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4534/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4534/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4534",
"html_url": "https://github.com/huggingface/datasets/pull/4534",
"diff_url": "https://github.com/huggingface/datasets/pull/4534.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4534.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4533 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4533/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4533/comments | https://api.github.com/repos/huggingface/datasets/issues/4533/events | https://github.com/huggingface/datasets/issues/4533 | 1,277,211,490 | I_kwDODunzps5MILNi | 4,533 | Timestamp not returned as datetime objects in streaming mode | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3287858981,
"node_id": "MDU6TGFiZWwzMjg3ODU4OTgx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/streaming",
"name": "streaming",
"color": "fef2c0",
"default": false,
"description": ""
}
] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [] | "2022-06-20T17:28:47" | "2022-06-22T16:29:09" | "2022-06-22T16:29:09" | MEMBER | null | As reported in (internal) https://github.com/huggingface/datasets-server/issues/397
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("ett", name="h2", split="test", streaming=True)
>>> d = next(iter(dataset))
>>> d['start']
Timestamp('2016-07-01 00:00:00')
```
while loading in non-streaming mode it returns `datetime.datetime(2016, 7, 1, 0, 0)` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4533/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4533/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4532 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4532/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4532/comments | https://api.github.com/repos/huggingface/datasets/issues/4532/events | https://github.com/huggingface/datasets/pull/4532 | 1,277,167,129 | PR_kwDODunzps459kB7 | 4,532 | Add Video feature | {
"login": "nateraw",
"id": 32437151,
"node_id": "MDQ6VXNlcjMyNDM3MTUx",
"avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/nateraw",
"html_url": "https://github.com/nateraw",
"followers_url": "https://api.github.com/users/nateraw/followers",
"following_url": "https://api.github.com/users/nateraw/following{/other_user}",
"gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}",
"starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/nateraw/subscriptions",
"organizations_url": "https://api.github.com/users/nateraw/orgs",
"repos_url": "https://api.github.com/users/nateraw/repos",
"events_url": "https://api.github.com/users/nateraw/events{/privacy}",
"received_events_url": "https://api.github.com/users/nateraw/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-20T16:36:41" | "2022-11-10T16:59:51" | "2022-11-10T16:59:51" | CONTRIBUTOR | null | The following adds a `Video` feature for encoding/decoding videos on the fly from in memory bytes. It uses my own `encoded-video` library which is basically `pytorchvideo`'s encoded video but with all the `torch` specific stuff stripped out. Because of that, and because the tool I used under the hood is not very mature, I leave this as a draft idea that we can use to build off of. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4532/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 2,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4532/timeline | null | null | true | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4532",
"html_url": "https://github.com/huggingface/datasets/pull/4532",
"diff_url": "https://github.com/huggingface/datasets/pull/4532.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4532.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4531 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4531/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4531/comments | https://api.github.com/repos/huggingface/datasets/issues/4531/events | https://github.com/huggingface/datasets/issues/4531 | 1,277,054,172 | I_kwDODunzps5MHkzc | 4,531 | Dataset Viewer issue for CSV datasets | {
"login": "merveenoyan",
"id": 53175384,
"node_id": "MDQ6VXNlcjUzMTc1Mzg0",
"avatar_url": "https://avatars.githubusercontent.com/u/53175384?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/merveenoyan",
"html_url": "https://github.com/merveenoyan",
"followers_url": "https://api.github.com/users/merveenoyan/followers",
"following_url": "https://api.github.com/users/merveenoyan/following{/other_user}",
"gists_url": "https://api.github.com/users/merveenoyan/gists{/gist_id}",
"starred_url": "https://api.github.com/users/merveenoyan/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/merveenoyan/subscriptions",
"organizations_url": "https://api.github.com/users/merveenoyan/orgs",
"repos_url": "https://api.github.com/users/merveenoyan/repos",
"events_url": "https://api.github.com/users/merveenoyan/events{/privacy}",
"received_events_url": "https://api.github.com/users/merveenoyan/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"this should now be fixed",
"Confirmed, it's fixed now. Thanks for reporting, and thanks @coyotte508 for fixing it\r\n\r\n<img width=\"1123\" alt=\"Capture dβeΜcran 2022-06-21 aΜ 10 28 05\" src=\"https://user-images.githubusercontent.com/1676121/174753833-1b453a5a-6a90-4717-bca1-1b5fc6b75e4a.png\">\r\n"
] | "2022-06-20T14:56:24" | "2022-06-21T08:28:46" | "2022-06-21T08:28:27" | CONTRIBUTOR | null | ### Link
https://huggingface.co/datasets/scikit-learn/breast-cancer-wisconsin
### Description
I'm populating CSV datasets [here](https://huggingface.co/scikit-learn) but the viewer is not enabled and it looks for a dataset loading script, the datasets aren't on queue as well.
You can replicate the problem by simply uploading any CSV dataset.
### Owner
Yes | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4531/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4531/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4530 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4530/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4530/comments | https://api.github.com/repos/huggingface/datasets/issues/4530/events | https://github.com/huggingface/datasets/pull/4530 | 1,276,884,962 | PR_kwDODunzps458n_S | 4,530 | Add AudioFolder packaged loader | {
"login": "polinaeterna",
"id": 16348744,
"node_id": "MDQ6VXNlcjE2MzQ4NzQ0",
"avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/polinaeterna",
"html_url": "https://github.com/polinaeterna",
"followers_url": "https://api.github.com/users/polinaeterna/followers",
"following_url": "https://api.github.com/users/polinaeterna/following{/other_user}",
"gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}",
"starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions",
"organizations_url": "https://api.github.com/users/polinaeterna/orgs",
"repos_url": "https://api.github.com/users/polinaeterna/repos",
"events_url": "https://api.github.com/users/polinaeterna/events{/privacy}",
"received_events_url": "https://api.github.com/users/polinaeterna/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | {
"login": "polinaeterna",
"id": 16348744,
"node_id": "MDQ6VXNlcjE2MzQ4NzQ0",
"avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/polinaeterna",
"html_url": "https://github.com/polinaeterna",
"followers_url": "https://api.github.com/users/polinaeterna/followers",
"following_url": "https://api.github.com/users/polinaeterna/following{/other_user}",
"gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}",
"starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions",
"organizations_url": "https://api.github.com/users/polinaeterna/orgs",
"repos_url": "https://api.github.com/users/polinaeterna/repos",
"events_url": "https://api.github.com/users/polinaeterna/events{/privacy}",
"received_events_url": "https://api.github.com/users/polinaeterna/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "polinaeterna",
"id": 16348744,
"node_id": "MDQ6VXNlcjE2MzQ4NzQ0",
"avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/polinaeterna",
"html_url": "https://github.com/polinaeterna",
"followers_url": "https://api.github.com/users/polinaeterna/followers",
"following_url": "https://api.github.com/users/polinaeterna/following{/other_user}",
"gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}",
"starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions",
"organizations_url": "https://api.github.com/users/polinaeterna/orgs",
"repos_url": "https://api.github.com/users/polinaeterna/repos",
"events_url": "https://api.github.com/users/polinaeterna/events{/privacy}",
"received_events_url": "https://api.github.com/users/polinaeterna/received_events",
"type": "User",
"site_admin": false
}
] | null | [] | "2022-06-20T12:54:02" | "2022-08-22T14:36:49" | "2022-08-22T14:20:40" | CONTRIBUTOR | null | will close #3964
AudioFolder is almost identical to ImageFolder except for inferring labels is not the default behavior (`drop_labels` is set to True in config), the option of inferring them is preserved though.
The weird thing is happening with the `test_data_files_with_metadata_and_archives` when `streaming` is `True`. Here is the log from the CI:
```
../.pyenv/versions/3.6.15/lib/python3.6/site-packages/datasets/features/audio.py:237: in _decode_non_mp3_path_like
array, sampling_rate = librosa.load(f, sr=self.sampling_rate, mono=self.mono)
../.pyenv/versions/3.6.15/lib/python3.6/site-packages/librosa/util/decorators.py:88: in inner_f
return f(*args, **kwargs)
../.pyenv/versions/3.6.15/lib/python3.6/site-packages/librosa/core/audio.py:176: in load
raise (exc)
../.pyenv/versions/3.6.15/lib/python3.6/site-packages/librosa/core/audio.py:155: in load
context = sf.SoundFile(path)
../.pyenv/versions/3.6.15/lib/python3.6/site-packages/soundfile.py:629: in __init__
self._file = self._open(file, mode_int, closefd)
../.pyenv/versions/3.6.15/lib/python3.6/site-packages/soundfile.py:1184: in _open
"Error opening {0!r}: ".format(self.name))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
err = 72
prefix = "Error opening <zipfile.ZipExtFile name='audio_file.wav' mode='r' compress_type=deflate>: "
def _error_check(err, prefix=""):
"""Pretty-print a numerical error code if there is an error."""
if err != 0:
err_str = _snd.sf_error_number(err)
> raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
E RuntimeError: Error opening <zipfile.ZipExtFile name='audio_file.wav' mode='r' compress_type=deflate>: Error in WAV file. No 'data' chunk marker.
```
I hadn't been able to reproduce this locally until I created the same test environment (I mean with `pip install .[tests]`) with python3.6. The same env but with python3.8 passes the test! I didn't manage to figure out what's wrong, I also tried simply to replace the test wav file and still got the same error. Versions of `soundfile`, `librosa` and `libsndfile` are identical. Might it be something with zip compression? Sounds weird but I don't have any other ideas...
TODO:
- [x] align with #4622
- [x] documentation
- [x] tests for AutoFolder? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4530/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 2,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4530/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4530",
"html_url": "https://github.com/huggingface/datasets/pull/4530",
"diff_url": "https://github.com/huggingface/datasets/pull/4530.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4530.patch",
"merged_at": "2022-08-22T14:20:40"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4528 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4528/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4528/comments | https://api.github.com/repos/huggingface/datasets/issues/4528/events | https://github.com/huggingface/datasets/issues/4528 | 1,276,679,155 | I_kwDODunzps5MGJPz | 4,528 | Memory leak when iterating a Dataset | {
"login": "NouamaneTazi",
"id": 29777165,
"node_id": "MDQ6VXNlcjI5Nzc3MTY1",
"avatar_url": "https://avatars.githubusercontent.com/u/29777165?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/NouamaneTazi",
"html_url": "https://github.com/NouamaneTazi",
"followers_url": "https://api.github.com/users/NouamaneTazi/followers",
"following_url": "https://api.github.com/users/NouamaneTazi/following{/other_user}",
"gists_url": "https://api.github.com/users/NouamaneTazi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/NouamaneTazi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/NouamaneTazi/subscriptions",
"organizations_url": "https://api.github.com/users/NouamaneTazi/orgs",
"repos_url": "https://api.github.com/users/NouamaneTazi/repos",
"events_url": "https://api.github.com/users/NouamaneTazi/events{/privacy}",
"received_events_url": "https://api.github.com/users/NouamaneTazi/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Is someone assigned to this issue?",
"The same issue is being debugged here: https://github.com/huggingface/datasets/issues/4883\r\n",
"Here is a modified repro example that makes it easier to see the leak:\r\n\r\n```\r\n$ cat ds2.py\r\nimport gc, sys\r\nimport time\r\nfrom datasets import load_dataset\r\nimport os, psutil\r\n\r\nprocess = psutil.Process(os.getpid())\r\n\r\nprint(process.memory_info().rss/2**20)\r\n\r\ncorpus = load_dataset(\"BeIR/msmarco\", 'corpus', keep_in_memory=False, streaming=False)['corpus']\r\ncorpus = corpus.select(range(200000))\r\n\r\nprint(process.memory_info().rss/2**20)\r\n\r\nbatch = None\r\n\r\nmem_before_start = psutil.Process(os.getpid()).memory_info().rss / 2**20\r\n\r\nstep = 20000\r\nfor i in range(0, 10*step, step):\r\n mem_before = psutil.Process(os.getpid()).memory_info().rss / 2**20\r\n batch = corpus[i:i+step]\r\n import objgraph\r\n #objgraph.show_refs([batch])\r\n #objgraph.show_refs([corpus])\r\n #sys.exit()\r\n gc.collect()\r\n\r\n mem_after = psutil.Process(os.getpid()).memory_info().rss / 2**20\r\n print(f\"{i:6d} {mem_after - mem_before:12.4f} {mem_after - mem_before_start:12.4f}\")\r\n\r\n```\r\n\r\nLet's run:\r\n\r\n```\r\n$ python ds2.py\r\n 0 36.5391 36.5391\r\n 20000 10.4609 47.0000\r\n 40000 5.9766 52.9766\r\n 60000 7.8906 60.8672\r\n 80000 6.0586 66.9258\r\n100000 8.4453 75.3711\r\n120000 6.7422 82.1133\r\n140000 8.5664 90.6797\r\n160000 5.7344 96.4141\r\n180000 8.3398 104.7539\r\n```\r\n\r\nYou can see the last column of total RSS memory keeps on growing in MBs. The mid column is by how much it was grown during a single iteration of the repro script (20000 items)",
"@NouamaneTazi, please check my analysis here https://github.com/huggingface/datasets/issues/4883#issuecomment-1242599722 so if you agree with my research this Issue can be closed as well.\r\n\r\nI also made a suggestion at how to proceed to hunt for a real leak here https://github.com/huggingface/datasets/issues/4883#issuecomment-1242600626\r\n\r\nyou may find this one to be useful as well https://github.com/huggingface/datasets/issues/4883#issuecomment-1242597966",
"Amazing job! Thanks for taking time to debug this π€\r\n\r\nFor my side, I tried to do some more research as well, but to no avail. https://github.com/huggingface/datasets/issues/4883#issuecomment-1243415957"
] | "2022-06-20T10:03:14" | "2022-09-12T08:51:39" | "2022-09-12T08:51:39" | MEMBER | null | e## Describe the bug
It seems that memory never gets freed after iterating a `Dataset` (using `.map()` or a simple `for` loop)
## Steps to reproduce the bug
```python
import gc
import logging
import time
import pyarrow
from datasets import load_dataset
from tqdm import trange
import os, psutil
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
process = psutil.Process(os.getpid())
print(process.memory_info().rss) # output: 633507840 bytes
corpus = load_dataset("BeIR/msmarco", 'corpus', keep_in_memory=False, streaming=False)['corpus'] # or "BeIR/trec-covid" for a smaller dataset
print(process.memory_info().rss) # output: 698601472 bytes
logger.info("Applying method to all examples in all splits")
for i in trange(0, len(corpus), 1000):
batch = corpus[i:i+1000]
data = pyarrow.total_allocated_bytes()
if data > 0:
logger.info(f"{i}/{len(corpus)}: {data}")
print(process.memory_info().rss) # output: 3788247040 bytes
del batch
gc.collect()
print(process.memory_info().rss) # output: 3788247040 bytes
logger.info("Done...")
time.sleep(100)
```
## Expected results
Limited memory usage, and memory to be freed after processing
## Actual results
Memory leak
![test](https://user-images.githubusercontent.com/29777165/174578276-f2c37e6c-b5d8-4985-b4d8-8413eb2b3241.png)
You can see how the memory allocation keeps increasing until it reaches a steady state when we hit the `time.sleep(100)`, which showcases that even the garbage collector couldn't free the allocated memory
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 2.3.2
- Platform: Linux-5.4.0-90-generic-x86_64-with-glibc2.31
- Python version: 3.9.7
- PyArrow version: 8.0.0
- Pandas version: 1.4.2
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4528/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4528/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4527 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4527/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4527/comments | https://api.github.com/repos/huggingface/datasets/issues/4527/events | https://github.com/huggingface/datasets/issues/4527 | 1,276,583,536 | I_kwDODunzps5MFx5w | 4,527 | Dataset Viewer issue for vadis/sv-ident | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Fixed, thanks!\r\n![Uploading Capture dβeΜcran 2022-06-21 aΜ 18.42.40.pngβ¦]()\r\n\r\n"
] | "2022-06-20T08:47:42" | "2022-06-21T16:42:46" | "2022-06-21T16:42:45" | MEMBER | null | ### Link
https://huggingface.co/datasets/vadis/sv-ident
### Description
The dataset preview does not work:
```
Server Error
Status code: 400
Exception: Status400Error
Message: The dataset does not exist.
```
However, the dataset is streamable and works locally:
```python
In [1]: from datasets import load_dataset; ds = load_dataset("sv-ident.py", split="train", streaming=True); item = next(iter(ds)); item
Using custom data configuration default
Out[1]:
{'sentence': 'Our point, however, is that so long as downward (favorable) comparisons overwhelm the potential for unfavorable comparisons, system justification should be a likely outcome amongst the disadvantaged.',
'is_variable': 1,
'variable': ['exploredata-ZA5400_VarV66', 'exploredata-ZA5400_VarV53'],
'research_data': ['ZA5400'],
'doc_id': '73106',
'uuid': 'b9fbb80f-3492-4b42-b9d5-0254cc33ac10',
'lang': 'en'}
```
CC: @e-tornike
### Owner
No | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4527/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4527/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4523 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4523/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4523/comments | https://api.github.com/repos/huggingface/datasets/issues/4523/events | https://github.com/huggingface/datasets/pull/4523 | 1,275,002,639 | PR_kwDODunzps452hgh | 4,523 | Update download url and improve card of `cats_vs_dogs` dataset | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-17T12:59:44" | "2022-06-21T14:23:26" | "2022-06-21T14:13:08" | CONTRIBUTOR | null | Improve the download URL (reported here: https://huggingface.co/datasets/cats_vs_dogs/discussions/1), remove the `image_file_path` column (not used in Transformers, so it should be safe) and add more info to the card. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4523/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4523/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4523",
"html_url": "https://github.com/huggingface/datasets/pull/4523",
"diff_url": "https://github.com/huggingface/datasets/pull/4523.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4523.patch",
"merged_at": "2022-06-21T14:13:08"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4521 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4521/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4521/comments | https://api.github.com/repos/huggingface/datasets/issues/4521/events | https://github.com/huggingface/datasets/issues/4521 | 1,274,919,437 | I_kwDODunzps5L_boN | 4,521 | Datasets method `.map` not hashing | {
"login": "sanchit-gandhi",
"id": 93869735,
"node_id": "U_kgDOBZhWpw",
"avatar_url": "https://avatars.githubusercontent.com/u/93869735?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sanchit-gandhi",
"html_url": "https://github.com/sanchit-gandhi",
"followers_url": "https://api.github.com/users/sanchit-gandhi/followers",
"following_url": "https://api.github.com/users/sanchit-gandhi/following{/other_user}",
"gists_url": "https://api.github.com/users/sanchit-gandhi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sanchit-gandhi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sanchit-gandhi/subscriptions",
"organizations_url": "https://api.github.com/users/sanchit-gandhi/orgs",
"repos_url": "https://api.github.com/users/sanchit-gandhi/repos",
"events_url": "https://api.github.com/users/sanchit-gandhi/events{/privacy}",
"received_events_url": "https://api.github.com/users/sanchit-gandhi/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Fix posted: https://github.com/huggingface/datasets/issues/4506#issuecomment-1157417219",
"Didn't realize it's a bug when I asked the question yesterday! Free free to post an answer if you are sure the cause has been addressed.\r\n\r\nhttps://stackoverflow.com/questions/72664827/can-pickle-dill-foo-but-not-lambda-x-foox",
"Thank @nalzok . That works for me:\r\n\r\n`pip install \"dill<0.3.5\"`"
] | "2022-06-17T11:31:10" | "2022-08-04T12:08:16" | "2022-06-28T13:23:05" | CONTRIBUTOR | null | ## Describe the bug
Datasets method `.map` not hashing, even with an empty no-op function
## Steps to reproduce the bug
```python
from datasets import load_dataset
# download 9MB dummy dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean")
def prepare_dataset(batch):
return(batch)
ds = ds.map(
prepare_dataset,
num_proc=1,
desc="preprocess train dataset",
)
```
## Expected results
Hashed and cached dataset preprocessing
## Actual results
Does not hash properly:
```
Parameter 'function'=<function prepare_dataset at 0x7fccb68e9280> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
```
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 2.3.3.dev0
- Platform: Linux-5.11.0-1028-gcp-x86_64-with-glibc2.31
- Python version: 3.9.12
- PyArrow version: 8.0.0
- Pandas version: 1.4.2
cc @lhoestq
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4521/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4521/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4520 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4520/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4520/comments | https://api.github.com/repos/huggingface/datasets/issues/4520/events | https://github.com/huggingface/datasets/issues/4520 | 1,274,879,180 | I_kwDODunzps5L_RzM | 4,520 | Failure to hash `dataclasses` - results in functions that cannot be hashed or cached in `.map` | {
"login": "sanchit-gandhi",
"id": 93869735,
"node_id": "U_kgDOBZhWpw",
"avatar_url": "https://avatars.githubusercontent.com/u/93869735?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sanchit-gandhi",
"html_url": "https://github.com/sanchit-gandhi",
"followers_url": "https://api.github.com/users/sanchit-gandhi/followers",
"following_url": "https://api.github.com/users/sanchit-gandhi/following{/other_user}",
"gists_url": "https://api.github.com/users/sanchit-gandhi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sanchit-gandhi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sanchit-gandhi/subscriptions",
"organizations_url": "https://api.github.com/users/sanchit-gandhi/orgs",
"repos_url": "https://api.github.com/users/sanchit-gandhi/repos",
"events_url": "https://api.github.com/users/sanchit-gandhi/events{/privacy}",
"received_events_url": "https://api.github.com/users/sanchit-gandhi/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"I think this has been fixed by #4516, let me know if you encounter this again :)\r\n\r\nI re-ran your code in 3.7 and 3.9 and it works fine",
"Thank you!"
] | "2022-06-17T10:47:17" | "2022-06-28T14:47:17" | "2022-06-28T14:04:29" | CONTRIBUTOR | null | Dataclasses cannot be hashed. As a result, they cannot be hashed or cached if used in the `.map` method. Dataclasses are used extensively in Transformers examples scripts: (c.f. [CTC example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py)). Since dataclasses cannot be hashed, one has to define separate variables prior to passing dataclass attributes to the `.map` method:
```python
phoneme_language = data_args.phoneme_language
```
in the example https://github.com/huggingface/transformers/blob/3c7e56fbb11f401de2528c1dcf0e282febc031cd/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L603-L630
## Steps to reproduce the bug
```python
from dataclasses import dataclass, field
from datasets.fingerprint import Hasher
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
phoneme_language: str = field(
default=None, metadata={"help": "The name of the phoneme language to use."}
)
data_args = DataTrainingArguments(phoneme_language ="foo")
Hasher.hash(data_args)
phoneme_language = data_args.phoneme_language
Hasher.hash(phoneme_language)
```
## Expected results
A hash.
## Actual results
<details>
<summary> Traceback </summary>
```
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Input In [1], in <cell line: 16>()
10 phoneme_language: str = field(
11 default=None, metadata={"help": "The name of the phoneme language to use."}
12 )
14 data_args = DataTrainingArguments(phoneme_language ="foo")
---> 16 Hasher.hash(data_args)
18 phoneme_language = data_args. phoneme_language
20 Hasher.hash(phoneme_language)
File ~/datasets/src/datasets/fingerprint.py:237, in Hasher.hash(cls, value)
235 return cls.dispatch[type(value)](cls, value)
236 else:
--> 237 return cls.hash_default(value)
File ~/datasets/src/datasets/fingerprint.py:230, in Hasher.hash_default(cls, value)
228 @classmethod
229 def hash_default(cls, value: Any) -> str:
--> 230 return cls.hash_bytes(dumps(value))
File ~/datasets/src/datasets/utils/py_utils.py:564, in dumps(obj)
562 file = StringIO()
563 with _no_cache_fields(obj):
--> 564 dump(obj, file)
565 return file.getvalue()
File ~/datasets/src/datasets/utils/py_utils.py:539, in dump(obj, file)
537 def dump(obj, file):
538 """pickle an object to a file"""
--> 539 Pickler(file, recurse=True).dump(obj)
540 return
File ~/hf/lib/python3.8/site-packages/dill/_dill.py:620, in Pickler.dump(self, obj)
618 raise PicklingError(msg)
619 else:
--> 620 StockPickler.dump(self, obj)
621 return
File /usr/lib/python3.8/pickle.py:487, in _Pickler.dump(self, obj)
485 if self.proto >= 4:
486 self.framer.start_framing()
--> 487 self.save(obj)
488 self.write(STOP)
489 self.framer.end_framing()
File /usr/lib/python3.8/pickle.py:603, in _Pickler.save(self, obj, save_persistent_id)
599 raise PicklingError("Tuple returned by %s must have "
600 "two to six elements" % reduce)
602 # Save the reduce() output and finally memoize the object
--> 603 self.save_reduce(obj=obj, *rv)
File /usr/lib/python3.8/pickle.py:687, in _Pickler.save_reduce(self, func, args, state, listitems, dictitems, state_setter, obj)
684 raise PicklingError(
685 "args[0] from __newobj__ args has the wrong class")
686 args = args[1:]
--> 687 save(cls)
688 save(args)
689 write(NEWOBJ)
File /usr/lib/python3.8/pickle.py:560, in _Pickler.save(self, obj, save_persistent_id)
558 f = self.dispatch.get(t)
559 if f is not None:
--> 560 f(self, obj) # Call unbound method with explicit self
561 return
563 # Check private dispatch table if any, or else
564 # copyreg.dispatch_table
File ~/hf/lib/python3.8/site-packages/dill/_dill.py:1838, in save_type(pickler, obj, postproc_list)
1836 postproc_list = []
1837 postproc_list.append((setattr, (obj, '__qualname__', obj_name)))
-> 1838 _save_with_postproc(pickler, (_create_type, (
1839 type(obj), obj.__name__, obj.__bases__, _dict
1840 )), obj=obj, postproc_list=postproc_list)
1841 log.info("# %s" % _t)
1842 else:
File ~/hf/lib/python3.8/site-packages/dill/_dill.py:1140, in _save_with_postproc(pickler, reduction, is_pickler_dill, obj, postproc_list)
1137 pickler._postproc[id(obj)] = postproc_list
1139 # TODO: Use state_setter in Python 3.8 to allow for faster cPickle implementations
-> 1140 pickler.save_reduce(*reduction, obj=obj)
1142 if is_pickler_dill:
1143 # pickler.x -= 1
1144 # print(pickler.x*' ', 'pop', obj, id(obj))
1145 postproc = pickler._postproc.pop(id(obj))
File /usr/lib/python3.8/pickle.py:692, in _Pickler.save_reduce(self, func, args, state, listitems, dictitems, state_setter, obj)
690 else:
691 save(func)
--> 692 save(args)
693 write(REDUCE)
695 if obj is not None:
696 # If the object is already in the memo, this means it is
697 # recursive. In this case, throw away everything we put on the
698 # stack, and fetch the object back from the memo.
File /usr/lib/python3.8/pickle.py:560, in _Pickler.save(self, obj, save_persistent_id)
558 f = self.dispatch.get(t)
559 if f is not None:
--> 560 f(self, obj) # Call unbound method with explicit self
561 return
563 # Check private dispatch table if any, or else
564 # copyreg.dispatch_table
File /usr/lib/python3.8/pickle.py:901, in _Pickler.save_tuple(self, obj)
899 write(MARK)
900 for element in obj:
--> 901 save(element)
903 if id(obj) in memo:
904 # Subtle. d was not in memo when we entered save_tuple(), so
905 # the process of saving the tuple's elements must have saved
(...)
909 # could have been done in the "for element" loop instead, but
910 # recursive tuples are a rare thing.
911 get = self.get(memo[id(obj)][0])
File /usr/lib/python3.8/pickle.py:560, in _Pickler.save(self, obj, save_persistent_id)
558 f = self.dispatch.get(t)
559 if f is not None:
--> 560 f(self, obj) # Call unbound method with explicit self
561 return
563 # Check private dispatch table if any, or else
564 # copyreg.dispatch_table
File ~/hf/lib/python3.8/site-packages/dill/_dill.py:1251, in save_module_dict(pickler, obj)
1248 if is_dill(pickler, child=False) and pickler._session:
1249 # we only care about session the first pass thru
1250 pickler._first_pass = False
-> 1251 StockPickler.save_dict(pickler, obj)
1252 log.info("# D2")
1253 return
File /usr/lib/python3.8/pickle.py:971, in _Pickler.save_dict(self, obj)
968 self.write(MARK + DICT)
970 self.memoize(obj)
--> 971 self._batch_setitems(obj.items())
File /usr/lib/python3.8/pickle.py:997, in _Pickler._batch_setitems(self, items)
995 for k, v in tmp:
996 save(k)
--> 997 save(v)
998 write(SETITEMS)
999 elif n:
File /usr/lib/python3.8/pickle.py:560, in _Pickler.save(self, obj, save_persistent_id)
558 f = self.dispatch.get(t)
559 if f is not None:
--> 560 f(self, obj) # Call unbound method with explicit self
561 return
563 # Check private dispatch table if any, or else
564 # copyreg.dispatch_table
File ~/datasets/src/datasets/utils/py_utils.py:862, in save_function(pickler, obj)
859 if state_dict:
860 state = state, state_dict
--> 862 dill._dill._save_with_postproc(
863 pickler,
864 (
865 dill._dill._create_function,
866 (obj.__code__, globs, obj.__name__, obj.__defaults__, closure),
867 state,
868 ),
869 obj=obj,
870 postproc_list=postproc_list,
871 )
872 else:
873 closure = obj.func_closure
File ~/hf/lib/python3.8/site-packages/dill/_dill.py:1153, in _save_with_postproc(pickler, reduction, is_pickler_dill, obj, postproc_list)
1151 dest, source = reduction[1]
1152 if source:
-> 1153 pickler.write(pickler.get(pickler.memo[id(dest)][0]))
1154 pickler._batch_setitems(iter(source.items()))
1155 else:
1156 # Updating with an empty dictionary. Same as doing nothing.
KeyError: 140434581781568
```
</details>
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 2.3.3.dev0
- Platform: Linux-5.11.0-1028-gcp-x86_64-with-glibc2.29
- Python version: 3.8.10
- PyArrow version: 8.0.0
- Pandas version: 1.4.2
cc @lhoestq | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4520/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4520/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4519 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4519/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4519/comments | https://api.github.com/repos/huggingface/datasets/issues/4519/events | https://github.com/huggingface/datasets/pull/4519 | 1,274,110,623 | PR_kwDODunzps45zhqa | 4,519 | Create new sections for audio and vision in guides | {
"login": "stevhliu",
"id": 59462357,
"node_id": "MDQ6VXNlcjU5NDYyMzU3",
"avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/stevhliu",
"html_url": "https://github.com/stevhliu",
"followers_url": "https://api.github.com/users/stevhliu/followers",
"following_url": "https://api.github.com/users/stevhliu/following{/other_user}",
"gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}",
"starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions",
"organizations_url": "https://api.github.com/users/stevhliu/orgs",
"repos_url": "https://api.github.com/users/stevhliu/repos",
"events_url": "https://api.github.com/users/stevhliu/events{/privacy}",
"received_events_url": "https://api.github.com/users/stevhliu/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892861,
"node_id": "MDU6TGFiZWwxOTM1ODkyODYx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/documentation",
"name": "documentation",
"color": "0075ca",
"default": true,
"description": "Improvements or additions to documentation"
}
] | closed | false | null | [] | null | [] | "2022-06-16T21:38:24" | "2022-07-07T15:36:37" | "2022-07-07T15:24:58" | MEMBER | null | This PR creates separate sections in the guides for audio, vision, text, and general usage so it is easier for users to find loading, processing, or sharing guides specific to the dataset type they're working with. It'll also allow us to scale the docs to additional dataset types - like time series, tabular, etc. - while keeping our docs information architecture.
Some other changes include:
- ~Experimented with decorating text with some CSS to highlight guides specific to each modality. Hopefully, it'll be easier for users to find and realize that these different docs exist!~ Will experiment with this in a different PR.
- Added deprecation warning for Metrics and redirect to Evaluate.
- Updated `set_format` section to recommend using the new `to_tf_dataset` function if you need to convert to a TensorFlow dataset.
- Reorganized `toctree` to nest general usage, audio, vision, and text sections under the how-to guides.
- A quick review and edit to the Load and Process docs for clarity. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4519/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4519/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4519",
"html_url": "https://github.com/huggingface/datasets/pull/4519",
"diff_url": "https://github.com/huggingface/datasets/pull/4519.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4519.patch",
"merged_at": "2022-07-07T15:24:58"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4518 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4518/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4518/comments | https://api.github.com/repos/huggingface/datasets/issues/4518/events | https://github.com/huggingface/datasets/pull/4518 | 1,274,010,628 | PR_kwDODunzps45zMnB | 4,518 | Patch tests for hfh v0.8.0 | {
"login": "LysandreJik",
"id": 30755778,
"node_id": "MDQ6VXNlcjMwNzU1Nzc4",
"avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/LysandreJik",
"html_url": "https://github.com/LysandreJik",
"followers_url": "https://api.github.com/users/LysandreJik/followers",
"following_url": "https://api.github.com/users/LysandreJik/following{/other_user}",
"gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}",
"starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions",
"organizations_url": "https://api.github.com/users/LysandreJik/orgs",
"repos_url": "https://api.github.com/users/LysandreJik/repos",
"events_url": "https://api.github.com/users/LysandreJik/events{/privacy}",
"received_events_url": "https://api.github.com/users/LysandreJik/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T19:45:32" | "2022-06-17T16:15:57" | "2022-06-17T16:06:07" | MEMBER | null | This PR patches testing utilities that would otherwise fail with hfh v0.8.0. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4518/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4518/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4518",
"html_url": "https://github.com/huggingface/datasets/pull/4518",
"diff_url": "https://github.com/huggingface/datasets/pull/4518.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4518.patch",
"merged_at": "2022-06-17T16:06:07"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4517 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4517/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4517/comments | https://api.github.com/repos/huggingface/datasets/issues/4517/events | https://github.com/huggingface/datasets/pull/4517 | 1,273,960,476 | PR_kwDODunzps45zBl0 | 4,517 | Add tags for task_ids:summarization-* and task_categories:summarization* | {
"login": "hobson",
"id": 292855,
"node_id": "MDQ6VXNlcjI5Mjg1NQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/292855?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/hobson",
"html_url": "https://github.com/hobson",
"followers_url": "https://api.github.com/users/hobson/followers",
"following_url": "https://api.github.com/users/hobson/following{/other_user}",
"gists_url": "https://api.github.com/users/hobson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/hobson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/hobson/subscriptions",
"organizations_url": "https://api.github.com/users/hobson/orgs",
"repos_url": "https://api.github.com/users/hobson/repos",
"events_url": "https://api.github.com/users/hobson/events{/privacy}",
"received_events_url": "https://api.github.com/users/hobson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T18:52:25" | "2022-07-08T15:14:23" | "2022-07-08T15:02:31" | CONTRIBUTOR | null | yaml header at top of README.md file was edited to add task tags because I couldn't find the existing tags in the json
separate Pull Request will modify dataset_infos.json to add these tags
The Enron dataset (dataset id aeslc) is only tagged with:
arxiv:1906.03497'
languages:en
pretty_name:AESLC
Using the email subject_line field as a label or target variable it possible to create models for the following task_ids (in order of relevance):
'task_ids:summarization'
'task_ids:summarization-other-conversations-summarization'
"task_ids:other-other-query-based-multi-document-summarization"
'task_ids:summarization-other-aspect-based-summarization'
'task_ids:summarization--other-headline-generation'
The subject might also be used for the task_category "task_categories:summarization"
E-mail chains might be used for the task category "task_categories:dialogue-system" | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4517/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4517/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4517",
"html_url": "https://github.com/huggingface/datasets/pull/4517",
"diff_url": "https://github.com/huggingface/datasets/pull/4517.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4517.patch",
"merged_at": "2022-07-08T15:02:31"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4516 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4516/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4516/comments | https://api.github.com/repos/huggingface/datasets/issues/4516/events | https://github.com/huggingface/datasets/pull/4516 | 1,273,825,640 | PR_kwDODunzps45ykYX | 4,516 | Fix hashing for python 3.9 | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T16:42:31" | "2022-06-28T13:33:46" | "2022-06-28T13:23:06" | MEMBER | null | In python 3.9, pickle hashes the `glob_ids` dictionary in addition to the `globs` of a function.
Therefore the test at `tests/test_fingerprint.py::RecurseDumpTest::test_recurse_dump_for_function_with_shuffled_globals` is currently failing for python 3.9
To make hashing deterministic when the globals are not in the same order, we also need to make the order of `glob_ids` deterministic.
Right now we don't have a CI to test python 3.9 but we should definitely have one. For this PR in particular I ran the tests locally using python 3.9 and they're passing now.
Fix https://github.com/huggingface/datasets/issues/4506 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4516/reactions",
"total_count": 4,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 4,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4516/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4516",
"html_url": "https://github.com/huggingface/datasets/pull/4516",
"diff_url": "https://github.com/huggingface/datasets/pull/4516.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4516.patch",
"merged_at": "2022-06-28T13:23:05"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4515 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4515/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4515/comments | https://api.github.com/repos/huggingface/datasets/issues/4515/events | https://github.com/huggingface/datasets/pull/4515 | 1,273,626,131 | PR_kwDODunzps45x5mB | 4,515 | Add uppercased versions of image file extensions for automatic module inference | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T14:14:49" | "2022-06-16T17:21:53" | "2022-06-16T17:11:41" | CONTRIBUTOR | null | Adds the uppercased versions of the image file extensions to the supported extensions.
Another approach would be to call `.lower()` on extensions while resolving data files, but uppercased extensions are not something we want to encourage out of the box IMO unless they are commonly used (as they are in the vision domain)
Note that there is a slight discrepancy between the image file resolution and `imagefolder` as the latter calls `.lower()` on file extensions leading to some image file extensions being ignored by the resolution but not by the loader (e.g. `pNg`). Such extensions should also be discouraged, so I'm ignoring that case too.
Fix #4514. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4515/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4515/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4515",
"html_url": "https://github.com/huggingface/datasets/pull/4515",
"diff_url": "https://github.com/huggingface/datasets/pull/4515.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4515.patch",
"merged_at": "2022-06-16T17:11:40"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4514 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4514/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4514/comments | https://api.github.com/repos/huggingface/datasets/issues/4514/events | https://github.com/huggingface/datasets/issues/4514 | 1,273,505,230 | I_kwDODunzps5L6CXO | 4,514 | Allow .JPEG as a file extension | {
"login": "DiGyt",
"id": 34550289,
"node_id": "MDQ6VXNlcjM0NTUwMjg5",
"avatar_url": "https://avatars.githubusercontent.com/u/34550289?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/DiGyt",
"html_url": "https://github.com/DiGyt",
"followers_url": "https://api.github.com/users/DiGyt/followers",
"following_url": "https://api.github.com/users/DiGyt/following{/other_user}",
"gists_url": "https://api.github.com/users/DiGyt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/DiGyt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/DiGyt/subscriptions",
"organizations_url": "https://api.github.com/users/DiGyt/orgs",
"repos_url": "https://api.github.com/users/DiGyt/repos",
"events_url": "https://api.github.com/users/DiGyt/events{/privacy}",
"received_events_url": "https://api.github.com/users/DiGyt/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi, thanks for reporting! I've opened a PR with the fix.",
"Wow, that was quick! Thank you very much π "
] | "2022-06-16T12:36:20" | "2022-06-20T08:18:46" | "2022-06-16T17:11:40" | NONE | null | ## Describe the bug
When loading image data, HF datasets seems to recognize `.jpg` and `.jpeg` file extensions, but not e.g. .JPEG. As the naming convention .JPEG is used in important datasets such as imagenet, I would welcome if according extensions like .JPEG or .JPG would be allowed.
## Steps to reproduce the bug
```python
# use bash to create 2 sham datasets with jpeg and JPEG ext
!mkdir dataset_a
!mkdir dataset_b
!wget https://upload.wikimedia.org/wikipedia/commons/7/71/Dsc_%28179253513%29.jpeg -O example_img.jpeg
!cp example_img.jpeg ./dataset_a/
!mv example_img.jpeg ./dataset_b/example_img.JPEG
from datasets import load_dataset
# working
df1 = load_dataset("./dataset_a", ignore_verifications=True)
#not working
df2 = load_dataset("./dataset_b", ignore_verifications=True)
# show
print(df1, df2)
```
## Expected results
```
DatasetDict({
train: Dataset({
features: ['image', 'label'],
num_rows: 1
})
}) DatasetDict({
train: Dataset({
features: ['image', 'label'],
num_rows: 1
})
})
```
## Actual results
```
FileNotFoundError: Unable to resolve any data file that matches '['**']' at /..PATH../dataset_b with any supported extension ['csv', 'tsv', 'json', 'jsonl', 'parquet', 'txt', 'blp', 'bmp', 'dib', 'bufr', 'cur', 'pcx', 'dcx', 'dds', 'ps', 'eps', 'fit', 'fits', 'fli', 'flc', 'ftc', 'ftu', 'gbr', 'gif', 'grib', 'h5', 'hdf', 'png', 'apng', 'jp2', 'j2k', 'jpc', 'jpf', 'jpx', 'j2c', 'icns', 'ico', 'im', 'iim', 'tif', 'tiff', 'jfif', 'jpe', 'jpg', 'jpeg', 'mpg', 'mpeg', 'msp', 'pcd', 'pxr', 'pbm', 'pgm', 'ppm', 'pnm', 'psd', 'bw', 'rgb', 'rgba', 'sgi', 'ras', 'tga', 'icb', 'vda', 'vst', 'webp', 'wmf', 'emf', 'xbm', 'xpm', 'zip']
```
I know that it can be annoying to allow seemingly arbitrary numbers of file extensions. But I think this one would be really welcome. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4514/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4514/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4513 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4513/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4513/comments | https://api.github.com/repos/huggingface/datasets/issues/4513/events | https://github.com/huggingface/datasets/pull/4513 | 1,273,450,338 | PR_kwDODunzps45xTqv | 4,513 | Update Google Cloud Storage documentation and add Azure Blob Storage example | {
"login": "alvarobartt",
"id": 36760800,
"node_id": "MDQ6VXNlcjM2NzYwODAw",
"avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alvarobartt",
"html_url": "https://github.com/alvarobartt",
"followers_url": "https://api.github.com/users/alvarobartt/followers",
"following_url": "https://api.github.com/users/alvarobartt/following{/other_user}",
"gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions",
"organizations_url": "https://api.github.com/users/alvarobartt/orgs",
"repos_url": "https://api.github.com/users/alvarobartt/repos",
"events_url": "https://api.github.com/users/alvarobartt/events{/privacy}",
"received_events_url": "https://api.github.com/users/alvarobartt/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892861,
"node_id": "MDU6TGFiZWwxOTM1ODkyODYx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/documentation",
"name": "documentation",
"color": "0075ca",
"default": true,
"description": "Improvements or additions to documentation"
}
] | closed | false | null | [] | null | [] | "2022-06-16T11:46:09" | "2022-06-23T17:05:11" | "2022-06-23T16:54:59" | CONTRIBUTOR | null | While I was going through the π€ Datasets documentation of the Cloud storage filesystems at https://huggingface.co/docs/datasets/filesystems, I realized that the Google Cloud Storage documentation could be improved e.g. bullet point says "Load your dataset" when the actual call was to "Save your dataset", in-line code comment was mentioning "s3 bucket" instead of "gcs bucket", and some more in-line comments could be included.
Also, I think that mixing Google Cloud Storage documentation with AWS S3's one was a little bit confusing, so I moved all those to the end of the document under an h2 tab named "Other filesystems", with an h3 for "Google Cloud Storage".
Besides that, I was currently working with Azure Blob Storage and found out that the URL to [adlfs](https://github.com/fsspec/adlfs) was common for both filesystems Azure Blob Storage and Azure DataLake Storage, as well as the URL, which was updated even though the redirect was working fine, so I decided to group those under the same row in the column of supported filesystems.
And took also the change to add a small documentation entry as for Google Cloud Storage but for Azure Blob Storage, as I assume that AWS S3, GCP Cloud Storage, and Azure Blob Storage, are the most used cloud storage providers.
Let me know if you're OK with these changes, or whether you want me to roll back some of those! :hugs: | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4513/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4513/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4513",
"html_url": "https://github.com/huggingface/datasets/pull/4513",
"diff_url": "https://github.com/huggingface/datasets/pull/4513.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4513.patch",
"merged_at": "2022-06-23T16:54:59"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4512 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4512/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4512/comments | https://api.github.com/repos/huggingface/datasets/issues/4512/events | https://github.com/huggingface/datasets/pull/4512 | 1,273,378,129 | PR_kwDODunzps45xEDN | 4,512 | Add links to vision tasks scripts in ADD_NEW_DATASET template | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T10:35:35" | "2022-07-08T14:07:50" | "2022-07-08T13:56:23" | CONTRIBUTOR | null | Add links to vision dataset scripts in the ADD_NEW_DATASET template. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4512/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4512/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4512",
"html_url": "https://github.com/huggingface/datasets/pull/4512",
"diff_url": "https://github.com/huggingface/datasets/pull/4512.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4512.patch",
"merged_at": "2022-07-08T13:56:23"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4511 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4511/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4511/comments | https://api.github.com/repos/huggingface/datasets/issues/4511/events | https://github.com/huggingface/datasets/pull/4511 | 1,273,336,874 | PR_kwDODunzps45w7RN | 4,511 | Support all negative values in ClassLabel | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T09:59:39" | "2022-07-28T16:03:27" | "2022-06-16T13:54:07" | MEMBER | null | We usually use -1 to represent a missing label, but we should also support any negative values (some users use -100 for example). This is a regression from `datasets` 2.3
Fix https://github.com/huggingface/datasets/issues/4508 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4511/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4511/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4511",
"html_url": "https://github.com/huggingface/datasets/pull/4511",
"diff_url": "https://github.com/huggingface/datasets/pull/4511.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4511.patch",
"merged_at": "2022-06-16T13:54:07"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4510 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4510/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4510/comments | https://api.github.com/repos/huggingface/datasets/issues/4510/events | https://github.com/huggingface/datasets/pull/4510 | 1,273,260,396 | PR_kwDODunzps45wq6o | 4,510 | Add regression test for `ArrowWriter.write_batch` when batch is empty | {
"login": "alvarobartt",
"id": 36760800,
"node_id": "MDQ6VXNlcjM2NzYwODAw",
"avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alvarobartt",
"html_url": "https://github.com/alvarobartt",
"followers_url": "https://api.github.com/users/alvarobartt/followers",
"following_url": "https://api.github.com/users/alvarobartt/following{/other_user}",
"gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions",
"organizations_url": "https://api.github.com/users/alvarobartt/orgs",
"repos_url": "https://api.github.com/users/alvarobartt/repos",
"events_url": "https://api.github.com/users/alvarobartt/events{/privacy}",
"received_events_url": "https://api.github.com/users/alvarobartt/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T08:53:51" | "2022-06-16T12:38:02" | "2022-06-16T12:28:19" | CONTRIBUTOR | null | As spotted by @cccntu in #4502, there's a logic bug in `ArrowWriter.write_batch` as the if-statement to handle the empty batches as detailed in the docstrings of the function ("Ignores the batch if it appears to be empty, preventing a potential schema update of unknown types."), the current if-statement is not handling properly `writer.write_batch({})` as an error is triggered.
Also, if we add a regression test in `test_arrow_writer.py::test_write_batch` before applying the fix, the test will fail as when trying to write an empty batch as follows:
```
=================================================================================== short test summary info ===================================================================================
FAILED tests/test_arrow_writer.py::test_write_batch[None-None] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[None-1] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[None-10] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[fields1-None] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[fields1-1] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[fields1-10] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[fields2-None] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[fields2-1] - ValueError: Schema and number of arrays unequal
FAILED tests/test_arrow_writer.py::test_write_batch[fields2-10] - ValueError: Schema and number of arrays unequal
======================================================================== 9 failed, 73 deselected, 7 warnings in 0.81s =========================================================================
```
So the batch is not ignored when empty, as `batch_examples={}` won't match the condition `if batch_examples: ...`. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4510/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4510/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4510",
"html_url": "https://github.com/huggingface/datasets/pull/4510",
"diff_url": "https://github.com/huggingface/datasets/pull/4510.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4510.patch",
"merged_at": "2022-06-16T12:28:19"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4509 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4509/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4509/comments | https://api.github.com/repos/huggingface/datasets/issues/4509/events | https://github.com/huggingface/datasets/pull/4509 | 1,273,227,760 | PR_kwDODunzps45wkDl | 4,509 | Support skipping Parquet to Arrow conversion when using Beam | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-16T08:25:38" | "2022-11-07T16:22:41" | "2022-11-07T16:22:41" | MEMBER | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4509/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4509/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4509",
"html_url": "https://github.com/huggingface/datasets/pull/4509",
"diff_url": "https://github.com/huggingface/datasets/pull/4509.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4509.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4508 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4508/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4508/comments | https://api.github.com/repos/huggingface/datasets/issues/4508/events | https://github.com/huggingface/datasets/issues/4508 | 1,272,718,921 | I_kwDODunzps5L3CZJ | 4,508 | cast_storage method from datasets.features | {
"login": "romainremyb",
"id": 67968596,
"node_id": "MDQ6VXNlcjY3OTY4NTk2",
"avatar_url": "https://avatars.githubusercontent.com/u/67968596?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/romainremyb",
"html_url": "https://github.com/romainremyb",
"followers_url": "https://api.github.com/users/romainremyb/followers",
"following_url": "https://api.github.com/users/romainremyb/following{/other_user}",
"gists_url": "https://api.github.com/users/romainremyb/gists{/gist_id}",
"starred_url": "https://api.github.com/users/romainremyb/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/romainremyb/subscriptions",
"organizations_url": "https://api.github.com/users/romainremyb/orgs",
"repos_url": "https://api.github.com/users/romainremyb/repos",
"events_url": "https://api.github.com/users/romainremyb/events{/privacy}",
"received_events_url": "https://api.github.com/users/romainremyb/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi! We've recently added a check to the `ClassLabel` type to ensure the values are in the valid label range `-1, 0, ..., num_classes-1` (-1 is used for missing values). The error in your case happens only if the `labels` column is of type `Sequence(ClassLabel(...))` before the `map` call and can be avoided by calling `dataset = dataset.cast_column(\"labels\", Sequence(Value(\"int\")))` beforehand. The token-classification examples in Transformers introduce a new `labels` column, so their type is also `Sequence(Value(\"int\"))`, which doesn't lead to an error as this type unbounded. ",
"I'm fine with re-adding support for all negative values for unknown/missing labels @mariosasko, wdyt ?"
] | "2022-06-15T20:47:22" | "2022-06-16T13:54:07" | "2022-06-16T13:54:07" | NONE | null | ## Describe the bug
A bug occurs when mapping a function to a dataset object. I ran the same code with the same data yesterday and it worked just fine. It works when i run locally on an old version of datasets.
## Steps to reproduce the bug
Steps are:
- load whatever datset
- write a preprocessing function such as "tokenize_and_align_labels" written in https://huggingface.co/docs/transformers/tasks/token_classification
- map the function on dataset and get "ValueError: Class label -100 less than -1" from cast_storage method from datasets.features
# Sample code to reproduce the bug
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True, max_length=38,padding="max_length")
labels = []
for i, label in enumerate(examples[f"labels"]):
word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word.
previous_word_idx = None
label_ids = []
for word_idx in word_ids: # Set the special tokens to -100.
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx: # Only label the first token of a given word.
label_ids.append(label[word_idx])
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
dt = dataset.map(tokenize_and_align_labels, batched=True)
## Expected results
New dataset objects should load and do on older versions.
## Actual results
"ValueError: Class label -100 less than -1" from cast_storage method from datasets.features
## Environment info
everything works fine on older installations of datasets/transformers
Issue arises when installing datasets on google collab under python3.7
I can't manage to find the exact output you're requirering but version printed is datasets-2.3.2
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4508/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4508/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4507 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4507/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4507/comments | https://api.github.com/repos/huggingface/datasets/issues/4507/events | https://github.com/huggingface/datasets/issues/4507 | 1,272,615,932 | I_kwDODunzps5L2pP8 | 4,507 | How to let `load_dataset` return a `Dataset` instead of `DatasetDict` in customized loading script | {
"login": "liyucheng09",
"id": 27999909,
"node_id": "MDQ6VXNlcjI3OTk5OTA5",
"avatar_url": "https://avatars.githubusercontent.com/u/27999909?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/liyucheng09",
"html_url": "https://github.com/liyucheng09",
"followers_url": "https://api.github.com/users/liyucheng09/followers",
"following_url": "https://api.github.com/users/liyucheng09/following{/other_user}",
"gists_url": "https://api.github.com/users/liyucheng09/gists{/gist_id}",
"starred_url": "https://api.github.com/users/liyucheng09/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/liyucheng09/subscriptions",
"organizations_url": "https://api.github.com/users/liyucheng09/orgs",
"repos_url": "https://api.github.com/users/liyucheng09/repos",
"events_url": "https://api.github.com/users/liyucheng09/events{/privacy}",
"received_events_url": "https://api.github.com/users/liyucheng09/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | null | [] | null | [
"Hi @liyucheng09.\r\n\r\nUsers can pass the `split` parameter to `load_dataset`. For example, if your split name is \"train\",\r\n```python\r\nds = load_dataset(\"dataset_name\", split=\"train\")\r\n```\r\nwill return a `Dataset` instance.",
"@albertvillanova Thanks! I can't believe I didn't know this feature till now."
] | "2022-06-15T18:56:34" | "2022-06-16T10:40:08" | "2022-06-16T10:40:08" | NONE | null | If the dataset does not need splits, i.e., no training and validation split, more like a table. How can I let the `load_dataset` function return a `Dataset` object directly rather than return a `DatasetDict` object with only one key-value pair.
Or I can paraphrase the question in the following way: how to skip `_split_generators` step in `DatasetBuilder` to let `as_dataset` gives a single `Dataset` rather than a list`[Dataset]`?
Many thanks for any help. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4507/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4507/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4506 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4506/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4506/comments | https://api.github.com/repos/huggingface/datasets/issues/4506/events | https://github.com/huggingface/datasets/issues/4506 | 1,272,516,895 | I_kwDODunzps5L2REf | 4,506 | Failure to hash (and cache) a `.map(...)` (almost always) - using this method can produce incorrect results | {
"login": "DrMatters",
"id": 22641583,
"node_id": "MDQ6VXNlcjIyNjQxNTgz",
"avatar_url": "https://avatars.githubusercontent.com/u/22641583?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/DrMatters",
"html_url": "https://github.com/DrMatters",
"followers_url": "https://api.github.com/users/DrMatters/followers",
"following_url": "https://api.github.com/users/DrMatters/following{/other_user}",
"gists_url": "https://api.github.com/users/DrMatters/gists{/gist_id}",
"starred_url": "https://api.github.com/users/DrMatters/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/DrMatters/subscriptions",
"organizations_url": "https://api.github.com/users/DrMatters/orgs",
"repos_url": "https://api.github.com/users/DrMatters/repos",
"events_url": "https://api.github.com/users/DrMatters/events{/privacy}",
"received_events_url": "https://api.github.com/users/DrMatters/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Important info:\r\n\r\nAs hashes are generated randomly for functions, it leads to **false identifying some results as already hashed** (mapping function is not executed after a method update) when there's a `pytorch_lightning.seed_everything(123)`",
"@lhoestq\r\nseems like quite critical stuff for me, if I'm not making a mistake",
"Hi ! Thanks for reporting. This bug seems to appear in python 3.9 using dill 3.5.1\r\n\r\nAs a workaround you can use an older version of dill:\r\n```\r\npip install \"dill<0.3.5\"\r\n```",
"installing `dill<0.3.5` after installing `datasets` by pip results in dependency conflict with the version required for `multiprocess`. It can be solved by installing `pip install datasets \"dill<0.3.5\"` (simultaneously) on a clean environment",
"This has been fixed in https://github.com/huggingface/datasets/pull/4516, we will do a new release soon to include the fix :)"
] | "2022-06-15T17:11:31" | "2023-02-16T03:14:32" | "2022-06-28T13:23:05" | NONE | null | ## Describe the bug
Sometimes I get messages about not being able to hash a method:
`Parameter 'function'=<function StupidDataModule._separate_speaker_id_from_dialogue at 0x7f1b27180d30> of the transform datasets.arrow_dataset.Dataset.
_map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.`
Whilst the function looks like this:
```python
@staticmethod
def _separate_speaker_id_from_dialogue(example: arrow_dataset.Example):
speaker_id, dialogue = tuple(zip(*(example["dialogue"])))
example["speaker_id"] = speaker_id
example["dialogue"] = dialogue
return example
```
This is the first step in my preprocessing pipeline, but sometimes the message about failure to hash is not appearing on the first step, but then appears on a later step.
This error is sometimes causing a failure to use cached data, instead of re-running all steps again.
## Steps to reproduce the bug
```python
import copy
import datasets
from datasets import arrow_dataset
def main():
dataset = datasets.load_dataset("blended_skill_talk")
res = dataset.map(method)
print(res)
def method(example: arrow_dataset.Example):
example['previous_utterance_copy'] = copy.deepcopy(example['previous_utterance'])
return example
if __name__ == '__main__':
main()
```
Run with:
```
python -m reproduce_error
```
## Expected results
Dataset is mapped and cached correctly.
## Actual results
The code outputs this at some point:
`Parameter 'function'=<function method at 0x7faa83d2a160> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.`
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version:
- Platform: Ubuntu 20.04.3
- Python version: 3.9.12
- PyArrow version: 8.0.0
- Datasets version: 2.3.1
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4506/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4506/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4505 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4505/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4505/comments | https://api.github.com/repos/huggingface/datasets/issues/4505/events | https://github.com/huggingface/datasets/pull/4505 | 1,272,477,226 | PR_kwDODunzps45uH-o | 4,505 | Fix double dots in data files | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T16:31:04" | "2022-06-15T17:15:58" | "2022-06-15T17:05:53" | MEMBER | null | As mentioned in https://github.com/huggingface/transformers/pull/17715 `data_files` can't find a file if the path contains double dots `/../`. This has been introduced in https://github.com/huggingface/datasets/pull/4412, by trying to ignore hidden files and directories (i.e. if they start with a dot)
I fixed this and added a test
cc @sgugger @ydshieh | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4505/reactions",
"total_count": 3,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 3,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4505/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4505",
"html_url": "https://github.com/huggingface/datasets/pull/4505",
"diff_url": "https://github.com/huggingface/datasets/pull/4505.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4505.patch",
"merged_at": "2022-06-15T17:05:53"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4503 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4503/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4503/comments | https://api.github.com/repos/huggingface/datasets/issues/4503/events | https://github.com/huggingface/datasets/pull/4503 | 1,272,367,055 | PR_kwDODunzps45twLR | 4,503 | Refactor and add metadata to fever dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T14:59:47" | "2022-07-06T11:54:15" | "2022-07-06T11:41:30" | MEMBER | null | Related to: #4452 and #3792. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4503/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4503/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4503",
"html_url": "https://github.com/huggingface/datasets/pull/4503",
"diff_url": "https://github.com/huggingface/datasets/pull/4503.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4503.patch",
"merged_at": "2022-07-06T11:41:30"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4502 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4502/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4502/comments | https://api.github.com/repos/huggingface/datasets/issues/4502/events | https://github.com/huggingface/datasets/issues/4502 | 1,272,353,700 | I_kwDODunzps5L1pOk | 4,502 | Logic bug in arrow_writer? | {
"login": "cccntu",
"id": 31893406,
"node_id": "MDQ6VXNlcjMxODkzNDA2",
"avatar_url": "https://avatars.githubusercontent.com/u/31893406?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/cccntu",
"html_url": "https://github.com/cccntu",
"followers_url": "https://api.github.com/users/cccntu/followers",
"following_url": "https://api.github.com/users/cccntu/following{/other_user}",
"gists_url": "https://api.github.com/users/cccntu/gists{/gist_id}",
"starred_url": "https://api.github.com/users/cccntu/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/cccntu/subscriptions",
"organizations_url": "https://api.github.com/users/cccntu/orgs",
"repos_url": "https://api.github.com/users/cccntu/repos",
"events_url": "https://api.github.com/users/cccntu/events{/privacy}",
"received_events_url": "https://api.github.com/users/cccntu/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Hi @cccntu you're right, as when `batch_examples={}` the current if-statement won't be triggered as the condition won't be satisfied, I'll prepare a PR to address it as well as add the regression tests so that this issue is handled properly.",
"Hi @alvarobartt ,\r\nThanks for answering. Do you know when and why an empty batch is passed to this function? This only happened to me when processing with multiple workers, while chunking examples, I think.",
"> Hi @alvarobartt , Thanks for answering. Do you know when and why an empty batch is passed to this function? This only happened to me when processing with multiple workers, while chunking examples, I think.\r\n\r\nSo it depends on how you're actually chunking the data as if you're not handling empty chunks `batch_examples={}` or `batch_examples=None`, you may end up running into this issue. So you could check the chunks before you actually call `ArrowWriter.write_batch`, but anyway the fix you proposed I think improves the logic of `write_batch` to avoid running into these issues.",
"Thanks, I added a if-print and I found it does return an empty examples in the chunking function that is passed to `.map()`.",
"Hi ! We consider an empty batch to look like this:\r\n```python\r\nempty_batch = {\r\n \"column_1\": [],\r\n \"column_2\": [],\r\n ...\r\n}\r\n```\r\n\r\nWhile `{}` corresponds to a batch with no columns.\r\n\r\nTherefore calling this code should fail, because the two batches don't have the same columns:\r\n```python\r\nwriter.write_batch({\"a\": [1, 2, 3]})\r\nwriter.write_batch({})\r\n```\r\n\r\nIf you want to write an empty batch, you should do this instead:\r\n```python\r\nwriter.write_batch({\"a\": [1, 2, 3]})\r\nwriter.write_batch({\"a\": []})\r\n```",
"Makes sense, then the if-statement should remain the same or is it better to handle both cases separately using `if not batch_examples or len(next(iter(batch_examples.values()))) == 0: ...`?\r\n\r\nUpdating the regressions tests with an empty batch formatted as `{\"col_1\": [], \"col_2\": []}` instead of `{}` works fine with the current if, and also with the one proposed by @cccntu.",
"> Makes sense, then the if-statement should remain the same or is it better to handle both cases separately using if not batch_examples or len(next(iter(batch_examples.values()))) == 0: ...?\r\n\r\nThere's a check later in the code that makes sure that the columns are the right ones, so I don't think we need to check for `{}` here\r\n\r\nIn particular the check `if not batch_examples or len(next(iter(batch_examples.values()))) == 0:` doesn't raise an error while it should, that why the old `if` is fine IMO\r\n\r\n> Updating the regressions tests with an empty batch formatted as {\"col_1\": [], \"col_2\": []} instead of {} works fine with the current if, and also with the one proposed by @cccntu.\r\n\r\nCool ! If you want you can update your PR to add the regression tests, to make sure that `{\"col_1\": [], \"col_2\": []}` works but not `{}`",
"Great thanks for the response! So I'll just add that regression test and remove the current if-statement.",
"Hi @lhoestq ,\r\n\r\nThanks for your explanation. Now I get it that `{}` means the columns are different. But wouldn't it be nice if the code can ignore it, like it ignores `{\"a\": []}`?\r\n\r\n\r\n--- \r\nBTW, \r\n> There's a check later in the code that makes sure that the columns are the right ones, so I don't think we need to check for {} here\r\n\r\nI remember the error happens around here:\r\nhttps://github.com/huggingface/datasets/blob/88a902d6474fae8d793542d57a4f3b0d187f3c5b/src/datasets/arrow_writer.py#L506-L507\r\nThe error says something like `arrays` and `schema` doesn't have the same length. And it's not very clear I passed a `{}`.\r\n\r\nedit: actual error message\r\n```\r\nFile \"site-packages/datasets/arrow_writer.py\", line 595, in write_batch\r\n pa_table = pa.Table.from_arrays(arrays, schema=schema)\r\n File \"pyarrow/table.pxi\", line 3557, in pyarrow.lib.Table.from_arrays\r\n File \"pyarrow/table.pxi\", line 1401, in pyarrow.lib._sanitize_arrays\r\nValueError: Schema and number of arrays unequal\r\n```",
"> But wouldn't it be nice if the code can ignore it, like it ignores {\"a\": []}?\r\n\r\nI think it would make things confusing because it doesn't follow our definition of a batch: \"the columns of a batch = the keys of the dict\". It would probably break certain behaviors as well. For example if you remove all the columns of a dataset (using `.remove_colums(...)` or `.map(..., remove_columns=...)`), the writer has to write 0 columns, and currently the only way to tell the writer to do so using `write_batch` is to pass `{}`.\r\n\r\n> The error says something like arrays and schema doesn't have the same length. And it's not very clear I passed a {}.\r\n\r\nYea the message can actually be improved indeed, it's definitely not clear. Maybe we can add a line right before the call `pa.Table.from_arrays` to make sure the keys of the batch match the field names of the schema"
] | "2022-06-15T14:50:00" | "2022-06-18T15:15:51" | "2022-06-18T15:15:51" | CONTRIBUTOR | null | https://github.com/huggingface/datasets/blob/88a902d6474fae8d793542d57a4f3b0d187f3c5b/src/datasets/arrow_writer.py#L475-L488
I got some error, and I found it's caused by `batch_examples` being `{}`. I wonder if the code should be as follows:
```
- if batch_examples and len(next(iter(batch_examples.values()))) == 0:
+ if not batch_examples or len(next(iter(batch_examples.values()))) == 0:
return
```
@lhoestq | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4502/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4502/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4501 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4501/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4501/comments | https://api.github.com/repos/huggingface/datasets/issues/4501/events | https://github.com/huggingface/datasets/pull/4501 | 1,272,300,646 | PR_kwDODunzps45th2M | 4,501 | Corrected broken links in doc | {
"login": "clefourrier",
"id": 22726840,
"node_id": "MDQ6VXNlcjIyNzI2ODQw",
"avatar_url": "https://avatars.githubusercontent.com/u/22726840?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/clefourrier",
"html_url": "https://github.com/clefourrier",
"followers_url": "https://api.github.com/users/clefourrier/followers",
"following_url": "https://api.github.com/users/clefourrier/following{/other_user}",
"gists_url": "https://api.github.com/users/clefourrier/gists{/gist_id}",
"starred_url": "https://api.github.com/users/clefourrier/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/clefourrier/subscriptions",
"organizations_url": "https://api.github.com/users/clefourrier/orgs",
"repos_url": "https://api.github.com/users/clefourrier/repos",
"events_url": "https://api.github.com/users/clefourrier/events{/privacy}",
"received_events_url": "https://api.github.com/users/clefourrier/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T14:12:17" | "2022-06-15T15:11:05" | "2022-06-15T15:00:56" | CONTRIBUTOR | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4501/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4501/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4501",
"html_url": "https://github.com/huggingface/datasets/pull/4501",
"diff_url": "https://github.com/huggingface/datasets/pull/4501.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4501.patch",
"merged_at": "2022-06-15T15:00:56"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4500 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4500/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4500/comments | https://api.github.com/repos/huggingface/datasets/issues/4500/events | https://github.com/huggingface/datasets/pull/4500 | 1,272,281,992 | PR_kwDODunzps45tdxk | 4,500 | Add `concatenate_datasets` for iterable datasets | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T13:58:50" | "2022-06-28T21:25:39" | "2022-06-28T21:15:04" | MEMBER | null | `concatenate_datasets` currently only supports lists of `datasets.Dataset`, not lists of `datasets.IterableDataset` like `interleave_datasets`
Fix https://github.com/huggingface/datasets/issues/2564
I also moved `_interleave_map_style_datasets` from combine.py to arrow_dataset.py, since the logic depends a lot on the `Dataset` object internals
And I moved `concatenate_datasets` from arrow_dataset.py to combine.py to have it with `interleave_datasets` (though it's also copied in arrow_dataset module for backward compatibility for now) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4500/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4500/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4500",
"html_url": "https://github.com/huggingface/datasets/pull/4500",
"diff_url": "https://github.com/huggingface/datasets/pull/4500.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4500.patch",
"merged_at": "2022-06-28T21:15:04"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4499 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4499/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4499/comments | https://api.github.com/repos/huggingface/datasets/issues/4499/events | https://github.com/huggingface/datasets/pull/4499 | 1,272,118,162 | PR_kwDODunzps45s6Jh | 4,499 | fix ETT m1/m2 test/val dataset | {
"login": "kashif",
"id": 8100,
"node_id": "MDQ6VXNlcjgxMDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/8100?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/kashif",
"html_url": "https://github.com/kashif",
"followers_url": "https://api.github.com/users/kashif/followers",
"following_url": "https://api.github.com/users/kashif/following{/other_user}",
"gists_url": "https://api.github.com/users/kashif/gists{/gist_id}",
"starred_url": "https://api.github.com/users/kashif/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/kashif/subscriptions",
"organizations_url": "https://api.github.com/users/kashif/orgs",
"repos_url": "https://api.github.com/users/kashif/repos",
"events_url": "https://api.github.com/users/kashif/events{/privacy}",
"received_events_url": "https://api.github.com/users/kashif/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T11:51:02" | "2022-06-15T14:55:56" | "2022-06-15T14:45:13" | CONTRIBUTOR | null | https://huggingface.co/datasets/ett/discussions/1 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4499/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4499/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4499",
"html_url": "https://github.com/huggingface/datasets/pull/4499",
"diff_url": "https://github.com/huggingface/datasets/pull/4499.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4499.patch",
"merged_at": "2022-06-15T14:45:12"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4498 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4498/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4498/comments | https://api.github.com/repos/huggingface/datasets/issues/4498/events | https://github.com/huggingface/datasets/issues/4498 | 1,272,100,549 | I_kwDODunzps5L0rbF | 4,498 | WER and CER > 1 | {
"login": "sadrasabouri",
"id": 43045767,
"node_id": "MDQ6VXNlcjQzMDQ1NzY3",
"avatar_url": "https://avatars.githubusercontent.com/u/43045767?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sadrasabouri",
"html_url": "https://github.com/sadrasabouri",
"followers_url": "https://api.github.com/users/sadrasabouri/followers",
"following_url": "https://api.github.com/users/sadrasabouri/following{/other_user}",
"gists_url": "https://api.github.com/users/sadrasabouri/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sadrasabouri/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sadrasabouri/subscriptions",
"organizations_url": "https://api.github.com/users/sadrasabouri/orgs",
"repos_url": "https://api.github.com/users/sadrasabouri/repos",
"events_url": "https://api.github.com/users/sadrasabouri/events{/privacy}",
"received_events_url": "https://api.github.com/users/sadrasabouri/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"WER can have values bigger than 1.0, this is expected when there are too many insertions\r\n\r\nFrom [wikipedia](https://en.wikipedia.org/wiki/Word_error_rate):\r\n> Note that since N is the number of words in the reference, the word error rate can be larger than 1.0"
] | "2022-06-15T11:35:12" | "2022-06-15T16:38:05" | "2022-06-15T16:38:05" | NONE | null | ## Describe the bug
It seems that in some cases in which the `prediction` is longer than the `reference` we may have word/character error rate higher than 1 which is a bit odd.
If it's a real bug I think I can solve it with a PR changing [this](https://github.com/huggingface/datasets/blob/master/metrics/wer/wer.py#L105) line to
```python
return min(incorrect / total, 1.0)
```
## Steps to reproduce the bug
```python
from datasets import load_metric
wer = load_metric("wer")
wer_value = wer.compute(predictions=["Hi World vka"], references=["Hello"])
print(wer_value)
```
## Expected results
```
1.0
```
## Actual results
```
3.0
```
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 2.3.0
- Platform: Linux-5.4.188+-x86_64-with-Ubuntu-18.04-bionic
- Python version: 3.7.13
- PyArrow version: 6.0.1
- Pandas version: 1.3.5 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4498/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4498/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4497 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4497/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4497/comments | https://api.github.com/repos/huggingface/datasets/issues/4497/events | https://github.com/huggingface/datasets/pull/4497 | 1,271,964,338 | PR_kwDODunzps45sYns | 4,497 | Re-add download_manager module in utils | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T09:44:33" | "2022-06-15T10:33:28" | "2022-06-15T10:23:44" | MEMBER | null | https://github.com/huggingface/datasets/pull/4384 moved `datasets.utils.download_manager` to `datasets.download.download_manager`
This breaks `evaluate` which imports `DownloadMode` from `datasets.utils.download_manager`
This PR re-adds `datasets.utils.download_manager` without circular imports.
We could also show a message that says that accessing it is deprecated, but I think we can do this in a subsequent PR, and just focus on doing a patch release for now | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4497/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4497/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4497",
"html_url": "https://github.com/huggingface/datasets/pull/4497",
"diff_url": "https://github.com/huggingface/datasets/pull/4497.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4497.patch",
"merged_at": "2022-06-15T10:23:44"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4496 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4496/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4496/comments | https://api.github.com/repos/huggingface/datasets/issues/4496/events | https://github.com/huggingface/datasets/pull/4496 | 1,271,945,704 | PR_kwDODunzps45sUnW | 4,496 | Replace `assertEqual` with `assertTupleEqual` in unit tests for verbosity | {
"login": "alvarobartt",
"id": 36760800,
"node_id": "MDQ6VXNlcjM2NzYwODAw",
"avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alvarobartt",
"html_url": "https://github.com/alvarobartt",
"followers_url": "https://api.github.com/users/alvarobartt/followers",
"following_url": "https://api.github.com/users/alvarobartt/following{/other_user}",
"gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions",
"organizations_url": "https://api.github.com/users/alvarobartt/orgs",
"repos_url": "https://api.github.com/users/alvarobartt/repos",
"events_url": "https://api.github.com/users/alvarobartt/events{/privacy}",
"received_events_url": "https://api.github.com/users/alvarobartt/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T09:29:16" | "2022-07-07T17:06:51" | "2022-07-07T16:55:48" | CONTRIBUTOR | null | As detailed in #4419 and as suggested by @mariosasko, we could replace the `assertEqual` assertions with `assertTupleEqual` when the assertion is between Tuples, in order to make the tests more verbose. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4496/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4496/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4496",
"html_url": "https://github.com/huggingface/datasets/pull/4496",
"diff_url": "https://github.com/huggingface/datasets/pull/4496.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4496.patch",
"merged_at": "2022-07-07T16:55:48"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4495 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4495/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4495/comments | https://api.github.com/repos/huggingface/datasets/issues/4495/events | https://github.com/huggingface/datasets/pull/4495 | 1,271,851,025 | PR_kwDODunzps45sAgO | 4,495 | Fix patching module that doesn't exist | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T08:17:50" | "2022-06-15T16:40:49" | "2022-06-15T08:54:09" | MEMBER | null | Reported in https://github.com/huggingface/huggingface_hub/runs/6894703718?check_suite_focus=true
When trying to patch `scipy.io.loadmat`:
```python
ModuleNotFoundError: No module named 'scipy'
```
Instead it shouldn't raise an error and do nothing
Bug introduced by #4375
Fix https://github.com/huggingface/datasets/issues/4494 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4495/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4495/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4495",
"html_url": "https://github.com/huggingface/datasets/pull/4495",
"diff_url": "https://github.com/huggingface/datasets/pull/4495.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4495.patch",
"merged_at": "2022-06-15T08:54:09"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4494 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4494/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4494/comments | https://api.github.com/repos/huggingface/datasets/issues/4494/events | https://github.com/huggingface/datasets/issues/4494 | 1,271,850,599 | I_kwDODunzps5LzuZn | 4,494 | Patching fails for modules that are not installed or don't exist | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-15T08:17:29" | "2022-06-15T08:54:09" | "2022-06-15T08:54:09" | MEMBER | null | Reported in https://github.com/huggingface/huggingface_hub/runs/6894703718?check_suite_focus=true
When trying to patch `scipy.io.loadmat`:
```python
ModuleNotFoundError: No module named 'scipy'
```
Instead it shouldn't raise an error and do nothing
We use patching to extend such functions to support remote URLs and work in streaming mode | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4494/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4494/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4493 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4493/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4493/comments | https://api.github.com/repos/huggingface/datasets/issues/4493/events | https://github.com/huggingface/datasets/pull/4493 | 1,271,306,385 | PR_kwDODunzps45qL7J | 4,493 | Add `@transmit_format` in `flatten` | {
"login": "alvarobartt",
"id": 36760800,
"node_id": "MDQ6VXNlcjM2NzYwODAw",
"avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alvarobartt",
"html_url": "https://github.com/alvarobartt",
"followers_url": "https://api.github.com/users/alvarobartt/followers",
"following_url": "https://api.github.com/users/alvarobartt/following{/other_user}",
"gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions",
"organizations_url": "https://api.github.com/users/alvarobartt/orgs",
"repos_url": "https://api.github.com/users/alvarobartt/repos",
"events_url": "https://api.github.com/users/alvarobartt/events{/privacy}",
"received_events_url": "https://api.github.com/users/alvarobartt/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-14T20:09:09" | "2022-09-27T11:37:25" | "2022-09-27T10:48:54" | CONTRIBUTOR | null | As suggested by @mariosasko in https://github.com/huggingface/datasets/pull/4411, we should include the `@transmit_format` decorator to `flatten`, `rename_column`, and `rename_columns` so as to ensure that the value of `_format_columns` in an `ArrowDataset` is properly updated.
**Edit**: according to @mariosasko comment below, the decorator `@transmit_format` doesn't handle column renaming, so it's done manually for those instead. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4493/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4493/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4493",
"html_url": "https://github.com/huggingface/datasets/pull/4493",
"diff_url": "https://github.com/huggingface/datasets/pull/4493.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4493.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4492 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4492/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4492/comments | https://api.github.com/repos/huggingface/datasets/issues/4492/events | https://github.com/huggingface/datasets/pull/4492 | 1,271,112,497 | PR_kwDODunzps45pktu | 4,492 | Pin the revision in imagenet download links | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-14T17:15:17" | "2022-06-14T17:35:13" | "2022-06-14T17:25:45" | MEMBER | null | Use the commit sha in the data files URLs of the imagenet-1k download script, in case we want to restructure the data files in the future. For example we may split it into many more shards for better paralellism.
cc @mariosasko | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4492/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4492/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4492",
"html_url": "https://github.com/huggingface/datasets/pull/4492",
"diff_url": "https://github.com/huggingface/datasets/pull/4492.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4492.patch",
"merged_at": "2022-06-14T17:25:45"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4491 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4491/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4491/comments | https://api.github.com/repos/huggingface/datasets/issues/4491/events | https://github.com/huggingface/datasets/issues/4491 | 1,270,803,822 | I_kwDODunzps5Lvu1u | 4,491 | Dataset Viewer issue for Pavithree/test | {
"login": "Pavithree",
"id": 23344465,
"node_id": "MDQ6VXNlcjIzMzQ0NDY1",
"avatar_url": "https://avatars.githubusercontent.com/u/23344465?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Pavithree",
"html_url": "https://github.com/Pavithree",
"followers_url": "https://api.github.com/users/Pavithree/followers",
"following_url": "https://api.github.com/users/Pavithree/following{/other_user}",
"gists_url": "https://api.github.com/users/Pavithree/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Pavithree/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Pavithree/subscriptions",
"organizations_url": "https://api.github.com/users/Pavithree/orgs",
"repos_url": "https://api.github.com/users/Pavithree/repos",
"events_url": "https://api.github.com/users/Pavithree/events{/privacy}",
"received_events_url": "https://api.github.com/users/Pavithree/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 3470211881,
"node_id": "LA_kwDODunzps7O1zsp",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer",
"name": "dataset-viewer",
"color": "E5583E",
"default": false,
"description": "Related to the dataset viewer on huggingface.co"
}
] | closed | false | {
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "severo",
"id": 1676121,
"node_id": "MDQ6VXNlcjE2NzYxMjE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/severo",
"html_url": "https://github.com/severo",
"followers_url": "https://api.github.com/users/severo/followers",
"following_url": "https://api.github.com/users/severo/following{/other_user}",
"gists_url": "https://api.github.com/users/severo/gists{/gist_id}",
"starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/severo/subscriptions",
"organizations_url": "https://api.github.com/users/severo/orgs",
"repos_url": "https://api.github.com/users/severo/repos",
"events_url": "https://api.github.com/users/severo/events{/privacy}",
"received_events_url": "https://api.github.com/users/severo/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"This issue can be resolved according to this post https://stackoverflow.com/questions/70566660/parquet-with-null-columns-on-pyarrow. It looks like first data entry in the json file must not have any null values as pyarrow uses this first file to infer schema for entire dataset."
] | "2022-06-14T13:23:10" | "2022-06-14T14:37:21" | "2022-06-14T14:34:33" | NONE | null | ### Link
https://huggingface.co/datasets/Pavithree/test
### Description
I have extracted the subset of original eli5 dataset found at hugging face. However, while loading the dataset It throws ArrowNotImplementedError: Unsupported cast from string to null using function cast_null error. Is there anything missing from my end? Kindly help.
### Owner
_No response_ | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4491/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4491/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4489 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4489/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4489/comments | https://api.github.com/repos/huggingface/datasets/issues/4489/events | https://github.com/huggingface/datasets/pull/4489 | 1,270,706,195 | PR_kwDODunzps45oONF | 4,489 | Add SV-Ident dataset | {
"login": "e-tornike",
"id": 20404466,
"node_id": "MDQ6VXNlcjIwNDA0NDY2",
"avatar_url": "https://avatars.githubusercontent.com/u/20404466?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/e-tornike",
"html_url": "https://github.com/e-tornike",
"followers_url": "https://api.github.com/users/e-tornike/followers",
"following_url": "https://api.github.com/users/e-tornike/following{/other_user}",
"gists_url": "https://api.github.com/users/e-tornike/gists{/gist_id}",
"starred_url": "https://api.github.com/users/e-tornike/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/e-tornike/subscriptions",
"organizations_url": "https://api.github.com/users/e-tornike/orgs",
"repos_url": "https://api.github.com/users/e-tornike/repos",
"events_url": "https://api.github.com/users/e-tornike/events{/privacy}",
"received_events_url": "https://api.github.com/users/e-tornike/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-14T12:09:00" | "2022-06-20T08:48:26" | "2022-06-20T08:37:27" | NONE | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4489/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4489/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4489",
"html_url": "https://github.com/huggingface/datasets/pull/4489",
"diff_url": "https://github.com/huggingface/datasets/pull/4489.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4489.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4488 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4488/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4488/comments | https://api.github.com/repos/huggingface/datasets/issues/4488/events | https://github.com/huggingface/datasets/pull/4488 | 1,270,613,857 | PR_kwDODunzps45n6Ja | 4,488 | Update PASS dataset version | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-14T10:47:14" | "2022-06-14T16:41:55" | "2022-06-14T16:32:28" | CONTRIBUTOR | null | Update the PASS dataset to version v3 (the newest one) from the [version history](https://github.com/yukimasano/PASS/blob/main/version_history.txt).
PS: The older versions are not exposed as configs in the script because v1 was removed from Zenodo, and the same thing will probably happen to v2. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4488/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4488/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4488",
"html_url": "https://github.com/huggingface/datasets/pull/4488",
"diff_url": "https://github.com/huggingface/datasets/pull/4488.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4488.patch",
"merged_at": "2022-06-14T16:32:28"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4487 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4487/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4487/comments | https://api.github.com/repos/huggingface/datasets/issues/4487/events | https://github.com/huggingface/datasets/pull/4487 | 1,270,525,163 | PR_kwDODunzps45nm5J | 4,487 | Support streaming UDHR dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-14T09:33:33" | "2022-06-15T05:09:22" | "2022-06-15T04:59:49" | MEMBER | null | This PR:
- Adds support for streaming UDHR dataset
- Adds the BCP 47 language code as feature | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4487/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4487/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4487",
"html_url": "https://github.com/huggingface/datasets/pull/4487",
"diff_url": "https://github.com/huggingface/datasets/pull/4487.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4487.patch",
"merged_at": "2022-06-15T04:59:49"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4486 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4486/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4486/comments | https://api.github.com/repos/huggingface/datasets/issues/4486/events | https://github.com/huggingface/datasets/pull/4486 | 1,269,518,084 | PR_kwDODunzps45kP88 | 4,486 | Add CCAgT dataset | {
"login": "johnnv1",
"id": 20444345,
"node_id": "MDQ6VXNlcjIwNDQ0MzQ1",
"avatar_url": "https://avatars.githubusercontent.com/u/20444345?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/johnnv1",
"html_url": "https://github.com/johnnv1",
"followers_url": "https://api.github.com/users/johnnv1/followers",
"following_url": "https://api.github.com/users/johnnv1/following{/other_user}",
"gists_url": "https://api.github.com/users/johnnv1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/johnnv1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/johnnv1/subscriptions",
"organizations_url": "https://api.github.com/users/johnnv1/orgs",
"repos_url": "https://api.github.com/users/johnnv1/repos",
"events_url": "https://api.github.com/users/johnnv1/events{/privacy}",
"received_events_url": "https://api.github.com/users/johnnv1/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-13T14:20:19" | "2022-07-04T14:37:03" | "2022-07-04T14:25:45" | NONE | null | As described in #4075
I could not generate the dummy data. Also, on the data repository isn't provided the split IDs, but I copy the functions that provide the correct data split. In summary, to have a better distribution, the data in this dataset should be separated based on the amount of NORs in each image. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4486/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4486/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4486",
"html_url": "https://github.com/huggingface/datasets/pull/4486",
"diff_url": "https://github.com/huggingface/datasets/pull/4486.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4486.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4485 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4485/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4485/comments | https://api.github.com/repos/huggingface/datasets/issues/4485/events | https://github.com/huggingface/datasets/pull/4485 | 1,269,463,054 | PR_kwDODunzps45kD7A | 4,485 | Fix cast to null | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-13T13:44:32" | "2022-06-14T13:43:54" | "2022-06-14T13:34:14" | MEMBER | null | It currently fails with `ArrowNotImplementedError` instead of `TypeError` when one tries to cast integer to null type.
Because if this, type inference breaks when one replaces null values with integers in `map` (it first tries to cast to the previous type before inferring the new type).
Fix https://github.com/huggingface/datasets/issues/4483 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4485/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4485/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4485",
"html_url": "https://github.com/huggingface/datasets/pull/4485",
"diff_url": "https://github.com/huggingface/datasets/pull/4485.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4485.patch",
"merged_at": "2022-06-14T13:34:14"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4484 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4484/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4484/comments | https://api.github.com/repos/huggingface/datasets/issues/4484/events | https://github.com/huggingface/datasets/pull/4484 | 1,269,383,811 | PR_kwDODunzps45jywZ | 4,484 | Better ImportError message when a dataset script dependency is missing | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-13T12:44:37" | "2022-07-08T14:30:44" | "2022-06-13T13:50:47" | MEMBER | null | When a depenency is missing for a dataset script, an ImportError message is shown, with a tip to install the missing dependencies. This message is not ideal at the moment: it may show duplicate dependencies, and is not very readable.
I improved it from
```
ImportError: To be able to use bigbench, you need to install the following dependencies['bigbench', 'bigbench', 'bigbench', 'bigbench'] using 'pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz" bigbench bigbench bigbench' for instance'
```
to
```
ImportError: To be able to use bigbench, you need to install the following dependency: bigbench.
Please install it using 'pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"' for instance'
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4484/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4484/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4484",
"html_url": "https://github.com/huggingface/datasets/pull/4484",
"diff_url": "https://github.com/huggingface/datasets/pull/4484.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4484.patch",
"merged_at": "2022-06-13T13:50:47"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4483 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4483/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4483/comments | https://api.github.com/repos/huggingface/datasets/issues/4483/events | https://github.com/huggingface/datasets/issues/4483 | 1,269,253,840 | I_kwDODunzps5Lp0bQ | 4,483 | Dataset.map throws pyarrow.lib.ArrowNotImplementedError when converting from list of empty lists | {
"login": "sanderland",
"id": 48946947,
"node_id": "MDQ6VXNlcjQ4OTQ2OTQ3",
"avatar_url": "https://avatars.githubusercontent.com/u/48946947?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sanderland",
"html_url": "https://github.com/sanderland",
"followers_url": "https://api.github.com/users/sanderland/followers",
"following_url": "https://api.github.com/users/sanderland/following{/other_user}",
"gists_url": "https://api.github.com/users/sanderland/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sanderland/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sanderland/subscriptions",
"organizations_url": "https://api.github.com/users/sanderland/orgs",
"repos_url": "https://api.github.com/users/sanderland/repos",
"events_url": "https://api.github.com/users/sanderland/events{/privacy}",
"received_events_url": "https://api.github.com/users/sanderland/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @sanderland ! Thanks for reporting :) This is a bug, I opened a PR to fix it. We'll do a new release soon\r\n\r\nIn the meantime you can fix it by specifying in advance that the \"label\" are integers:\r\n```python\r\nimport numpy as np\r\n\r\nds = Dataset.from_dict(\r\n {\r\n \"text\": [\"the lazy dog jumps over the quick fox\", \"another sentence\"],\r\n \"label\": [[], []],\r\n }\r\n)\r\n# explicitly say that the \"label\" type is int64, even though it contains only null values\r\nds = ds.cast_column(\"label\", Sequence(Value(\"int64\")))\r\n\r\ndef mapper(features):\r\n features['label'] = [\r\n [0,0,0] for l in features['label']\r\n ]\r\n return features\r\n\r\nds_mapped = ds.map(mapper,batched=True)\r\n```"
] | "2022-06-13T10:47:52" | "2022-06-14T13:34:14" | "2022-06-14T13:34:14" | CONTRIBUTOR | null | ## Describe the bug
Dataset.map throws pyarrow.lib.ArrowNotImplementedError: Unsupported cast from int64 to null using function cast_null when converting from a type of 'empty lists' to 'lists with some type'.
This appears to be due to the interaction of arrow internals and some assumptions made by datasets.
The bug appeared when binarizing some labels, and then adding a dataset which had all these labels absent (to force the model to not label empty strings such with anything)
Particularly the fact that this only happens in batched mode is strange.
## Steps to reproduce the bug
```python
import numpy as np
ds = Dataset.from_dict(
{
"text": ["the lazy dog jumps over the quick fox", "another sentence"],
"label": [[], []],
}
)
def mapper(features):
features['label'] = [
[0,0,0] for l in features['label']
]
return features
ds_mapped = ds.map(mapper,batched=True)
```
## Expected results
Not crashing
## Actual results
```
../.venv/lib/python3.8/site-packages/datasets/arrow_dataset.py:2346: in map
return self._map_single(
../.venv/lib/python3.8/site-packages/datasets/arrow_dataset.py:532: in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
../.venv/lib/python3.8/site-packages/datasets/arrow_dataset.py:499: in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
../.venv/lib/python3.8/site-packages/datasets/fingerprint.py:458: in wrapper
out = func(self, *args, **kwargs)
../.venv/lib/python3.8/site-packages/datasets/arrow_dataset.py:2751: in _map_single
writer.write_batch(batch)
../.venv/lib/python3.8/site-packages/datasets/arrow_writer.py:503: in write_batch
arrays.append(pa.array(typed_sequence))
pyarrow/array.pxi:230: in pyarrow.lib.array
???
pyarrow/array.pxi:110: in pyarrow.lib._handle_arrow_array_protocol
???
../.venv/lib/python3.8/site-packages/datasets/arrow_writer.py:198: in __arrow_array__
out = cast_array_to_feature(out, type, allow_number_to_str=not self.trying_type)
../.venv/lib/python3.8/site-packages/datasets/table.py:1675: in wrapper
return func(array, *args, **kwargs)
../.venv/lib/python3.8/site-packages/datasets/table.py:1812: in cast_array_to_feature
casted_values = _c(array.values, feature.feature)
../.venv/lib/python3.8/site-packages/datasets/table.py:1675: in wrapper
return func(array, *args, **kwargs)
../.venv/lib/python3.8/site-packages/datasets/table.py:1843: in cast_array_to_feature
return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
../.venv/lib/python3.8/site-packages/datasets/table.py:1675: in wrapper
return func(array, *args, **kwargs)
../.venv/lib/python3.8/site-packages/datasets/table.py:1752: in array_cast
return array.cast(pa_type)
pyarrow/array.pxi:915: in pyarrow.lib.Array.cast
???
../.venv/lib/python3.8/site-packages/pyarrow/compute.py:376: in cast
return call_function("cast", [arr], options)
pyarrow/_compute.pyx:542: in pyarrow._compute.call_function
???
pyarrow/_compute.pyx:341: in pyarrow._compute.Function.call
???
pyarrow/error.pxi:144: in pyarrow.lib.pyarrow_internal_check_status
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> ???
E pyarrow.lib.ArrowNotImplementedError: Unsupported cast from int64 to null using function cast_null
pyarrow/error.pxi:121: ArrowNotImplementedError
```
## Workarounds
* Not using batched=True
* Using an np.array([],dtype=float) or similar instead of [] in the input
* Naming the output column differently from the input column
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 2.2.2
- Platform: Ubuntu
- Python version: 3.8
- PyArrow version: 8.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4483/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4483/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4481 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4481/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4481/comments | https://api.github.com/repos/huggingface/datasets/issues/4481/events | https://github.com/huggingface/datasets/pull/4481 | 1,269,187,792 | PR_kwDODunzps45jIRi | 4,481 | Fix iwslt2017 | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-13T09:51:21" | "2022-10-26T09:09:31" | "2022-06-13T10:40:18" | MEMBER | null | The files were moved to google drive, I hosted them on the Hub instead (ok according to the license)
I also updated the `datasets_infos.json` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4481/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4481/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4481",
"html_url": "https://github.com/huggingface/datasets/pull/4481",
"diff_url": "https://github.com/huggingface/datasets/pull/4481.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4481.patch",
"merged_at": "2022-06-13T10:40:18"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4480 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4480/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4480/comments | https://api.github.com/repos/huggingface/datasets/issues/4480/events | https://github.com/huggingface/datasets/issues/4480 | 1,268,921,567 | I_kwDODunzps5LojTf | 4,480 | Bigbench tensorflow GPU dependency | {
"login": "cceyda",
"id": 15624271,
"node_id": "MDQ6VXNlcjE1NjI0Mjcx",
"avatar_url": "https://avatars.githubusercontent.com/u/15624271?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/cceyda",
"html_url": "https://github.com/cceyda",
"followers_url": "https://api.github.com/users/cceyda/followers",
"following_url": "https://api.github.com/users/cceyda/following{/other_user}",
"gists_url": "https://api.github.com/users/cceyda/gists{/gist_id}",
"starred_url": "https://api.github.com/users/cceyda/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/cceyda/subscriptions",
"organizations_url": "https://api.github.com/users/cceyda/orgs",
"repos_url": "https://api.github.com/users/cceyda/repos",
"events_url": "https://api.github.com/users/cceyda/events{/privacy}",
"received_events_url": "https://api.github.com/users/cceyda/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Thanks for reporting ! :) cc @andersjohanandreassen can you take a look at this ?\r\n\r\nAlso @cceyda feel free to open an issue at [BIG-Bench](https://github.com/google/BIG-bench) as well regarding the `AttributeError`",
"I'm on vacation for the next week, so won't be able to do much debugging at the moment. Sorry for the inconvenience.\r\nBut I did quickly take a look:\r\n\r\n**pypi**:\r\nI managed to reproduce the above error with the pypi version begin out of date. \r\nThe version on `https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz` should be up to date, but it was my understanding that there was some issue with the pypi upload, so I don't even understand why there is a version [on pypi from April 1](https://pypi.org/project/bigbench/0.0.1/). Perhaps @ethansdyer, who's handling the pypi upload, knows the answer to that?\r\n\r\n**OOM error**:\r\nBut, I'm unable to reproduce the OOM error in a google colab with GPU enabled.\r\nThis is what I ran:\r\n```\r\n!pip install bigbench@https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz\r\n!pip install datasets\r\n\r\nfrom datasets import load_dataset\r\ndataset = load_dataset(\"bigbench\",\"swedish_to_german_proverbs\")\r\n``` \r\nThe `swedish_to_german_proverbs`task is only 72 examples, so I don't understand what could be causing the OOM error. Loading the task has no effect on the RAM for me. @cceyda Can you confirm that this does not occur in a [colab](https://colab.research.google.com/)?\r\nIf the GPU is somehow causing issues on your system, disabling the GPU from TF might be an option too\r\n```\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"Solved.\r\nYes it works on colab, and somehow magically on my machine too now. hmm not sure what was wrong before I had used a fresh venv both times with just the dataloading code, and tried multiple times. (maybe just a wrong tensorflow version got mixed up somehow) The tensorflow call seems to come from the bigbench side anyway.\r\n\r\nabout bigbench pypi version update, I opened an issue over there https://github.com/google/BIG-bench/issues/846\r\n\r\nanyway closing this now. If anyone else has the same problem can re-open."
] | "2022-06-13T05:24:06" | "2022-06-14T19:45:24" | "2022-06-14T19:45:23" | CONTRIBUTOR | null | ## Describe the bug
Loading bigbech
```py
from datasets import load_dataset
dataset = load_dataset("bigbench","swedish_to_german_proverbs")
```
tries to use gpu and fails with OOM with the following error
```
Downloading and preparing dataset bigbench/swedish_to_german_proverbs (download: Unknown size, generated: 68.92 KiB, post-processed: Unknown size, total: 68.92 KiB) to /home/ceyda/.cache/huggingface/datasets/bigbench/swedish_to_german_proverbs/1.0.0/7d2f6e537fa937dfaac8b1c1df782f2055071d3fd8e4f4ae93d28012a354ced0...
Generating default split: 0%| | 0/72 [00:00<?, ? examples/s]2022-06-13 14:11:04.154469: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-06-13 14:11:05.133600: F tensorflow/core/platform/statusor.cc:33] Attempting to fetch value instead of handling error INTERNAL: failed initializing StreamExecutor for CUDA device ordinal 3: INTERNAL: failed call to cuDevicePrimaryCtxRetain: CUDA_ERROR_OUT_OF_MEMORY: out of memory; total memory reported: 25396838400
Aborted (core dumped)
```
I think this is because bigbench dependency (below) installs tensorflow (GPU version) and dataloading tries to use GPU as default.
`pip install bigbench@https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz`
while just doing 'pip install bigbench' results in following error
```
File "/home/ceyda/.local/lib/python3.7/site-packages/datasets/load.py", line 109, in import_main_class
module = importlib.import_module(module_path)
File "/usr/lib/python3.7/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/ceyda/.cache/huggingface/modules/datasets_modules/datasets/bigbench/7d2f6e537fa937dfaac8b1c1df782f2055071d3fd8e4f4ae93d28012a354ced0/bigbench.py", line 118, in <module>
class Bigbench(datasets.GeneratorBasedBuilder):
File "/home/ceyda/.cache/huggingface/modules/datasets_modules/datasets/bigbench/7d2f6e537fa937dfaac8b1c1df782f2055071d3fd8e4f4ae93d28012a354ced0/bigbench.py", line 127, in Bigbench
BigBenchConfig(name=name, version=datasets.Version("1.0.0")) for name in bb_utils.get_all_json_task_names()
AttributeError: module 'bigbench.api.util' has no attribute 'get_all_json_task_names'
```
## Steps to avoid the bug
Not ideal but can solve with (since I don't really use tensorflow elsewhere)
`pip uninstall tensorflow`
`pip install tensorflow-cpu`
## Environment info
- datasets @ master
- Python version: 3.7
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4480/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4480/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4479 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4479/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4479/comments | https://api.github.com/repos/huggingface/datasets/issues/4479/events | https://github.com/huggingface/datasets/pull/4479 | 1,268,558,237 | PR_kwDODunzps45hHtZ | 4,479 | Include entity positions as feature in ReCoRD | {
"login": "richarddwang",
"id": 17963619,
"node_id": "MDQ6VXNlcjE3OTYzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/17963619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/richarddwang",
"html_url": "https://github.com/richarddwang",
"followers_url": "https://api.github.com/users/richarddwang/followers",
"following_url": "https://api.github.com/users/richarddwang/following{/other_user}",
"gists_url": "https://api.github.com/users/richarddwang/gists{/gist_id}",
"starred_url": "https://api.github.com/users/richarddwang/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/richarddwang/subscriptions",
"organizations_url": "https://api.github.com/users/richarddwang/orgs",
"repos_url": "https://api.github.com/users/richarddwang/repos",
"events_url": "https://api.github.com/users/richarddwang/events{/privacy}",
"received_events_url": "https://api.github.com/users/richarddwang/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-12T11:56:28" | "2022-08-19T23:23:02" | "2022-08-19T13:23:48" | CONTRIBUTOR | null | https://huggingface.co/datasets/super_glue/viewer/record/validation
TLDR: We need to record entity positions, which are included in the source data but excluded by the loading script, to enable efficient and effective training for ReCoRD.
Currently, the loading script ignores the entity positions ("entity_start", "entity_end") and only records entity text. This might be because the training method of the official baseline is to make n training instance from a datapoint by replacing \"\@ placeholder\" in query with each entity individually.
But it increases the already heavy computation by multiple folds. So DeBERTa uses a method that take entity embeddings by their positions in the passage, and thus makes one training instance from one data point. It is way more efficient and proved effective for the ReCoRD task.
Can anybody help me with the dataset card rendering error? Maybe @lhoestq ? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4479/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4479/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4479",
"html_url": "https://github.com/huggingface/datasets/pull/4479",
"diff_url": "https://github.com/huggingface/datasets/pull/4479.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4479.patch",
"merged_at": "2022-08-19T13:23:48"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4477 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4477/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4477/comments | https://api.github.com/repos/huggingface/datasets/issues/4477/events | https://github.com/huggingface/datasets/issues/4477 | 1,268,308,986 | I_kwDODunzps5LmNv6 | 4,477 | Dataset Viewer issue for fgrezes/WIESP2022-NER | {
"login": "AshTayade",
"id": 42551754,
"node_id": "MDQ6VXNlcjQyNTUxNzU0",
"avatar_url": "https://avatars.githubusercontent.com/u/42551754?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/AshTayade",
"html_url": "https://github.com/AshTayade",
"followers_url": "https://api.github.com/users/AshTayade/followers",
"following_url": "https://api.github.com/users/AshTayade/following{/other_user}",
"gists_url": "https://api.github.com/users/AshTayade/gists{/gist_id}",
"starred_url": "https://api.github.com/users/AshTayade/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/AshTayade/subscriptions",
"organizations_url": "https://api.github.com/users/AshTayade/orgs",
"repos_url": "https://api.github.com/users/AshTayade/repos",
"events_url": "https://api.github.com/users/AshTayade/events{/privacy}",
"received_events_url": "https://api.github.com/users/AshTayade/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"https://huggingface.co/datasets/fgrezes/WIESP2022-NER\r\n\r\nThe error:\r\n\r\n```\r\nMessage: Couldn't find a dataset script at /src/services/worker/fgrezes/WIESP2022-NER/WIESP2022-NER.py or any data file in the same directory. Couldn't find 'fgrezes/WIESP2022-NER' on the Hugging Face Hub either: FileNotFoundError: Unable to resolve any data file that matches ['**test*', '**eval*'] in dataset repository fgrezes/WIESP2022-NER with any supported extension ['csv', 'tsv', 'json', 'jsonl', 'parquet', 'txt', 'blp', 'bmp', 'dib', 'bufr', 'cur', 'pcx', 'dcx', 'dds', 'ps', 'eps', 'fit', 'fits', 'fli', 'flc', 'ftc', 'ftu', 'gbr', 'gif', 'grib', 'h5', 'hdf', 'png', 'apng', 'jp2', 'j2k', 'jpc', 'jpf', 'jpx', 'j2c', 'icns', 'ico', 'im', 'iim', 'tif', 'tiff', 'jfif', 'jpe', 'jpg', 'jpeg', 'mpg', 'mpeg', 'msp', 'pcd', 'pxr', 'pbm', 'pgm', 'ppm', 'pnm', 'psd', 'bw', 'rgb', 'rgba', 'sgi', 'ras', 'tga', 'icb', 'vda', 'vst', 'webp', 'wmf', 'emf', 'xbm', 'xpm', 'zip']\r\n```\r\n\r\nI understand the issue is not related to the dataset viewer in itself, but with the autodetection of the data files without a loading script in the datasets library. cc @lhoestq @albertvillanova @mariosasko ",
"Apparently it finds `scoring-scripts/compute_seqeval.py` which matches `**eval*`, a regex that detects a test split. We should probably improve the regex because it's not supposed to catch this kind of files. It must also only check for files with supported extensions: txt, csv, png etc."
] | "2022-06-11T15:49:17" | "2022-07-18T13:07:33" | "2022-07-18T13:07:33" | NONE | null | ### Link
_No response_
### Description
_No response_
### Owner
_No response_ | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4477/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4477/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4476 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4476/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4476/comments | https://api.github.com/repos/huggingface/datasets/issues/4476/events | https://github.com/huggingface/datasets/issues/4476 | 1,267,987,499 | I_kwDODunzps5Lk_Qr | 4,476 | `to_pandas` doesn't take into account format. | {
"login": "Dref360",
"id": 8976546,
"node_id": "MDQ6VXNlcjg5NzY1NDY=",
"avatar_url": "https://avatars.githubusercontent.com/u/8976546?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Dref360",
"html_url": "https://github.com/Dref360",
"followers_url": "https://api.github.com/users/Dref360/followers",
"following_url": "https://api.github.com/users/Dref360/following{/other_user}",
"gists_url": "https://api.github.com/users/Dref360/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Dref360/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Dref360/subscriptions",
"organizations_url": "https://api.github.com/users/Dref360/orgs",
"repos_url": "https://api.github.com/users/Dref360/repos",
"events_url": "https://api.github.com/users/Dref360/events{/privacy}",
"received_events_url": "https://api.github.com/users/Dref360/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | null | [] | null | [
"Thanks for opening a discussion :)\r\n\r\nNote that you can use `.remove_columns(...)` to keep only the ones you're interested in before calling `.to_pandas()`",
"Yes I can do that thank you!\r\n\r\nDo you think that conceptually my example should work? If not, I'm happy to close this issue. \r\n\r\nIf yes, I can start working on it.",
"Hi! Instead of `with_format(columns=['a', 'b']).to_pandas()`, use `with_format(\"pandas\", columns=[\"a\", \"b\"])` for easy conversion of the parts of the dataset to pandas via indexing/slicing.\r\n\r\nThe full code:\r\n```python\r\nfrom datasets import Dataset\r\n\r\nds = Dataset.from_dict({'a': [1,2,3], 'b': [5,6,7], 'c': [8,9,10]})\r\npandas_df = ds.with_format(\"pandas\", columns=['a', 'b'])[:]\r\n```",
"Ahhhh Thank you!\r\n\r\nclosing then :)"
] | "2022-06-10T20:25:31" | "2022-06-15T17:41:41" | "2022-06-15T17:41:41" | CONTRIBUTOR | null | **Is your feature request related to a problem? Please describe.**
I have a large dataset that I need to convert part of to pandas to do some further analysis. Calling `to_pandas` directly on it is expensive. So I thought I could simply select the columns that I want and then call `to_pandas`.
**Describe the solution you'd like**
```python
from datasets import Dataset
ds = Dataset.from_dict({'a': [1,2,3], 'b': [5,6,7], 'c': [8,9,10]})
pandas_df = ds.with_format(columns=['a', 'b']).to_pandas()
# I would expect `pandas_df` to only include a,b as column.
```
**Describe alternatives you've considered**
I could remove all columns that I don't want? But I don't know all of them in advance.
**Additional context**
I can probably make a PR with some pointers.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4476/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4476/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4475 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4475/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4475/comments | https://api.github.com/repos/huggingface/datasets/issues/4475/events | https://github.com/huggingface/datasets/pull/4475 | 1,267,798,451 | PR_kwDODunzps45eufw | 4,475 | Improve error message for missing pacakges from inside dataset script | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-10T16:59:36" | "2022-10-06T13:46:26" | "2022-06-13T13:16:43" | CONTRIBUTOR | null | Improve the error message for missing packages from inside a dataset script:
With this change, the error message for missing packages for `bigbench` looks as follows:
```
ImportError: To be able to use bigbench, you need to install the following dependencies:
- 'bigbench' using 'pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"'
```
And this is how it looked before:
```
ImportError: To be able to use bigbench, you need to install the following dependencies['bigbench', 'bigbench', 'bigbench', 'bigbench'] using 'pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz" bigbench bigbench bigbench' for instance'
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4475/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4475/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4475",
"html_url": "https://github.com/huggingface/datasets/pull/4475",
"diff_url": "https://github.com/huggingface/datasets/pull/4475.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4475.patch",
"merged_at": null
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4474 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4474/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4474/comments | https://api.github.com/repos/huggingface/datasets/issues/4474/events | https://github.com/huggingface/datasets/pull/4474 | 1,267,767,541 | PR_kwDODunzps45en98 | 4,474 | [Docs] How to use with PyTorch page | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892861,
"node_id": "MDU6TGFiZWwxOTM1ODkyODYx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/documentation",
"name": "documentation",
"color": "0075ca",
"default": true,
"description": "Improvements or additions to documentation"
}
] | closed | false | null | [] | null | [] | "2022-06-10T16:25:49" | "2022-06-14T14:40:32" | "2022-06-14T14:04:33" | MEMBER | null | Currently the docs about PyTorch are scattered around different pages, and we were missing a place to explain more in depth how to use and optimize a dataset for PyTorch. This PR is related to #4457 which is the TF counterpart :)
cc @Rocketknight1 we can try to align both documentations contents now I think
cc @stevhliu let me know what you think ! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4474/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 1,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4474/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4474",
"html_url": "https://github.com/huggingface/datasets/pull/4474",
"diff_url": "https://github.com/huggingface/datasets/pull/4474.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4474.patch",
"merged_at": "2022-06-14T14:04:32"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4473 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4473/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4473/comments | https://api.github.com/repos/huggingface/datasets/issues/4473/events | https://github.com/huggingface/datasets/pull/4473 | 1,267,555,994 | PR_kwDODunzps45d5-R | 4,473 | Add SST-2 dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-10T13:37:26" | "2022-06-13T14:11:34" | "2022-06-13T14:01:09" | MEMBER | null | Add SST-2 dataset.
Currently it is part of GLUE benchmark.
This PR adds it as a standalone dataset.
CC: @julien-c | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4473/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4473/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4473",
"html_url": "https://github.com/huggingface/datasets/pull/4473",
"diff_url": "https://github.com/huggingface/datasets/pull/4473.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4473.patch",
"merged_at": "2022-06-13T14:01:09"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4472 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4472/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4472/comments | https://api.github.com/repos/huggingface/datasets/issues/4472/events | https://github.com/huggingface/datasets/pull/4472 | 1,267,488,523 | PR_kwDODunzps45drcb | 4,472 | Fix 401 error for unauthticated requests to non-existing repos | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-10T12:38:11" | "2022-06-10T13:05:11" | "2022-06-10T12:55:57" | MEMBER | null | The hub now returns 401 instead of 404 for unauthenticated requests to non-existing repos.
This PR add support for the 401 error and fixes the CI fails on `master` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4472/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 2,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4472/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4472",
"html_url": "https://github.com/huggingface/datasets/pull/4472",
"diff_url": "https://github.com/huggingface/datasets/pull/4472.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4472.patch",
"merged_at": "2022-06-10T12:55:56"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4471 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4471/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4471/comments | https://api.github.com/repos/huggingface/datasets/issues/4471/events | https://github.com/huggingface/datasets/issues/4471 | 1,267,475,268 | I_kwDODunzps5LjCNE | 4,471 | CI error with repo lhoestq/_dummy | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"fixed by https://github.com/huggingface/datasets/pull/4472"
] | "2022-06-10T12:26:06" | "2022-06-10T13:24:53" | "2022-06-10T13:24:53" | MEMBER | null | ## Describe the bug
CI is failing because of repo "lhoestq/_dummy". See: https://app.circleci.com/pipelines/github/huggingface/datasets/12461/workflows/1b040b45-9578-4ab9-8c44-c643c4eb8691/jobs/74269
```
requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://huggingface.co/api/datasets/lhoestq/_dummy?full=true
```
The repo seems to no longer exist: https://huggingface.co/api/datasets/lhoestq/_dummy
```
error: "Repository not found"
```
CC: @lhoestq | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4471/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4471/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4470 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4470/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4470/comments | https://api.github.com/repos/huggingface/datasets/issues/4470/events | https://github.com/huggingface/datasets/pull/4470 | 1,267,470,051 | PR_kwDODunzps45dnYw | 4,470 | Reorder returned validation/test splits in script template | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-10T12:21:13" | "2022-06-10T18:04:10" | "2022-06-10T17:54:50" | MEMBER | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4470/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4470/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4470",
"html_url": "https://github.com/huggingface/datasets/pull/4470",
"diff_url": "https://github.com/huggingface/datasets/pull/4470.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4470.patch",
"merged_at": "2022-06-10T17:54:50"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4469 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4469/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4469/comments | https://api.github.com/repos/huggingface/datasets/issues/4469/events | https://github.com/huggingface/datasets/pull/4469 | 1,267,213,849 | PR_kwDODunzps45cweQ | 4,469 | Replace data URLs in wider_face dataset once hosted on the Hub | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-10T08:13:25" | "2022-06-10T16:42:08" | "2022-06-10T16:32:46" | MEMBER | null | This PR replaces the URLs of data files in Google Drive with our Hub ones, once the data owners have approved to host their data on the Hub.
They also informed us that their dataset is licensed under CC BY-NC-ND. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4469/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 2,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4469/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4469",
"html_url": "https://github.com/huggingface/datasets/pull/4469",
"diff_url": "https://github.com/huggingface/datasets/pull/4469.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4469.patch",
"merged_at": "2022-06-10T16:32:46"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4468 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4468/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4468/comments | https://api.github.com/repos/huggingface/datasets/issues/4468/events | https://github.com/huggingface/datasets/pull/4468 | 1,266,715,742 | PR_kwDODunzps45bERK | 4,468 | Generalize tutorials for audio and vision | {
"login": "stevhliu",
"id": 59462357,
"node_id": "MDQ6VXNlcjU5NDYyMzU3",
"avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/stevhliu",
"html_url": "https://github.com/stevhliu",
"followers_url": "https://api.github.com/users/stevhliu/followers",
"following_url": "https://api.github.com/users/stevhliu/following{/other_user}",
"gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}",
"starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions",
"organizations_url": "https://api.github.com/users/stevhliu/orgs",
"repos_url": "https://api.github.com/users/stevhliu/repos",
"events_url": "https://api.github.com/users/stevhliu/events{/privacy}",
"received_events_url": "https://api.github.com/users/stevhliu/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892861,
"node_id": "MDU6TGFiZWwxOTM1ODkyODYx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/documentation",
"name": "documentation",
"color": "0075ca",
"default": true,
"description": "Improvements or additions to documentation"
}
] | closed | false | null | [] | null | [] | "2022-06-09T22:00:44" | "2022-06-14T16:22:02" | "2022-06-14T16:12:00" | MEMBER | null | This PR updates the tutorials to be more generalizable to all modalities. After reading the tutorials, a user should be able to load any type of dataset, know how to index into and slice a dataset, and do the most basic/common type of preprocessing (tokenization, resampling, applying transforms) depending on their dataset.
Other changes include:
- Removed the sections about a dataset's metadata, features, and columns because we cover this in an earlier tutorial about inspecting the `DatasetInfo` through the dataset builder.
- Separate the sharing dataset tutorial into two sections: (1) uploading via the web interface and (2) using the `huggingface_hub` library.
- Renamed some tutorials in the TOC to be more clear and specific.
- Added more text to nudge users towards joining the community and asking questions on the forums.
- If it's okay with everyone, I'd also like to remove the section about loading and using metrics since we have the `evaluate` docs now.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4468/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4468/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4468",
"html_url": "https://github.com/huggingface/datasets/pull/4468",
"diff_url": "https://github.com/huggingface/datasets/pull/4468.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4468.patch",
"merged_at": "2022-06-14T16:12:00"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4467 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4467/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4467/comments | https://api.github.com/repos/huggingface/datasets/issues/4467/events | https://github.com/huggingface/datasets/issues/4467 | 1,266,218,358 | I_kwDODunzps5LePV2 | 4,467 | Transcript string 'null' converted to [None] by load_dataset() | {
"login": "mbarnig",
"id": 1360633,
"node_id": "MDQ6VXNlcjEzNjA2MzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/1360633?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mbarnig",
"html_url": "https://github.com/mbarnig",
"followers_url": "https://api.github.com/users/mbarnig/followers",
"following_url": "https://api.github.com/users/mbarnig/following{/other_user}",
"gists_url": "https://api.github.com/users/mbarnig/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mbarnig/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mbarnig/subscriptions",
"organizations_url": "https://api.github.com/users/mbarnig/orgs",
"repos_url": "https://api.github.com/users/mbarnig/repos",
"events_url": "https://api.github.com/users/mbarnig/events{/privacy}",
"received_events_url": "https://api.github.com/users/mbarnig/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @mbarnig, thanks for reporting.\r\n\r\nPlease note that is an expected behavior by `pandas` (we use the `pandas` library to parse CSV files): https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html\r\n```\r\nBy default the following values are interpreted as NaN: \r\nββ, β#N/Aβ, β#N/A N/Aβ, β#NAβ, β-1.#INDβ, β-1.#QNANβ, β-NaNβ, β-nanβ, β1.#INDβ, β1.#QNANβ, β<NA>β, βN/Aβ, βNAβ, βNULLβ, βNaNβ, βn/aβ, βnanβ, βnullβ.\r\n```\r\n(see \"null\" in the last position in the above list).\r\n\r\nIn order to prevent `pandas` from performing that automatic conversion from the string \"null\" to a NaN value, you should pass the `pandas` parameter `keep_default_na=False`:\r\n```python\r\nIn [2]: dataset = load_dataset('csv', data_files={'train': 'null-test.csv'}, keep_default_na=False)\r\nIn [3]: dataset[\"train\"][0][\"transcript\"]\r\nOut[3]: 'null'\r\n```",
"Thanks for the quick answer."
] | "2022-06-09T14:26:00" | "2022-06-09T17:55:37" | "2022-06-09T16:29:02" | NONE | null | ## Issue
I am training a luxembourgish speech-recognition model in Colab with a custom dataset, including a dictionary of luxembourgish words, for example the speaken numbers 0 to 9. When preparing the dataset with the script
`ds_train1 = mydataset.map(prepare_dataset)`
the following error was issued:
```
ValueError Traceback (most recent call last)
<ipython-input-69-1e8f2b37f5bc> in <module>()
----> 1 ds_train = mydataset_train.map(prepare_dataset)
11 frames
/usr/local/lib/python3.7/dist-packages/transformers/tokenization_utils_base.py in __call__(self, text, text_pair, add_special_tokens, padding, truncation, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, **kwargs)
2450 if not _is_valid_text_input(text):
2451 raise ValueError(
-> 2452 "text input must of type str (single example), List[str] (batch or single pretokenized example) "
2453 "or List[List[str]] (batch of pretokenized examples)."
2454 )
ValueError: text input must of type str (single example), List[str] (batch or single pretokenized example) or List[List[str]] (batch of pretokenized examples).
```
Debugging this problem was not easy, all transcriptions in the dataset are correct strings. Finally I discovered that the transcription string 'null' is interpreted as [None] by the `load_dataset()` script. By deleting this row in the dataset the training worked fine.
## Expected result:
transcription 'null' interpreted as 'str' instead of 'None'.
## Reproduction
Here is the code to reproduce the error with a one-row-dataset.
```
with open("null-test.csv") as f:
reader = csv.reader(f)
for row in reader:
print(row)
```
['wav_filename', 'wav_filesize', 'transcript']
['wavs/female/NULL1.wav', '17530', 'null']
```
dataset = load_dataset('csv', data_files={'train': 'null-test.csv'})
```
Using custom data configuration default-81ac0c0e27af3514
Downloading and preparing dataset csv/default to /root/.cache/huggingface/datasets/csv/default-81ac0c0e27af3514/0.0.0/433e0ccc46f9880962cc2b12065189766fbb2bee57a221866138fb9203c83519...
Downloading data files: 100%
1/1 [00:00<00:00, 29.55it/s]
Extracting data files: 100%
1/1 [00:00<00:00, 23.66it/s]
Dataset csv downloaded and prepared to /root/.cache/huggingface/datasets/csv/default-81ac0c0e27af3514/0.0.0/433e0ccc46f9880962cc2b12065189766fbb2bee57a221866138fb9203c83519. Subsequent calls will reuse this data.
100%
1/1 [00:00<00:00, 25.84it/s]
```
print(dataset['train']['transcript'])
```
[None]
## Environment info
```
!pip install datasets==2.2.2
!pip install transformers==4.19.2
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4467/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4467/timeline | null | completed | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/4466 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4466/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4466/comments | https://api.github.com/repos/huggingface/datasets/issues/4466/events | https://github.com/huggingface/datasets/pull/4466 | 1,266,159,920 | PR_kwDODunzps45ZLsd | 4,466 | Optimize contiguous shard and select | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-09T13:45:39" | "2022-06-14T16:04:30" | "2022-06-14T15:54:45" | MEMBER | null | Currently `.shard()` and `.select()` always create an indices mapping. However if the requested data are contiguous, it's much more optimized to simply slice the Arrow table instead of building an indices mapping. In particular:
- the shard/select operation will be much faster
- reading speed will be much faster in the resulting dataset, since it won't have to do a lookup step in the indices mapping
Since `.shard()` is also used for `.map()` with `num_proc>1`, it will also significantly improve the reading speed of multiprocessed `.map()` operations
Here is an example of speed-up:
```python
>>> import io
>>> import numpy as np
>>> from datasets import Dataset
>>> ds = Dataset.from_dict({"a": np.random.rand(10_000_000)})
>>> shard = ds.shard(num_shards=4, index=0, contiguous=True) # this calls `.select(range(2_500_000))`
>>> buf = io.BytesIO()
>>> %time dd.to_json(buf)
Creating json from Arrow format: 100%|ββββββββββββββββββ| 100/100 [00:00<00:00, 376.17ba/s]
CPU times: user 258 ms, sys: 9.06 ms, total: 267 ms
Wall time: 266 ms
```
while previously it was
```python
Creating json from Arrow format: 100%|βββββββββββββββββββ| 100/100 [00:03<00:00, 29.41ba/s]
CPU times: user 3.33 s, sys: 69.1 ms, total: 3.39 s
Wall time: 3.4 s
```
In this simple case the speed-up is x10, but @sayakpaul experienced a x100 speed-up on its data when exporting to JSON.
## Implementation details
I mostly improved `.select()`: it now checks if the input corresponds to a contiguous chunk of data and then it slices the main Arrow table (or the indices mapping table if it exists). To check if the input indices are contiguous it checks two possibilities:
- if the indices is of type `range`, it checks that start >= 0 and step = 1
- otherwise in the general case, it iterates over the indices. If all the indices are contiguous then we're good, otherwise we have to build an indices mapping.
Having to iterate over the indices doesn't cause performance issues IMO because:
- either they are contiguous and in this case the cost of iterating over the indices is much less than the cost of creating an indices mapping
- or they are not contiguous, and then iterating generally stops quickly when it first encounters the first indice that is not contiguous. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4466/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4466/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4466",
"html_url": "https://github.com/huggingface/datasets/pull/4466",
"diff_url": "https://github.com/huggingface/datasets/pull/4466.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4466.patch",
"merged_at": "2022-06-14T15:54:45"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4465 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4465/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4465/comments | https://api.github.com/repos/huggingface/datasets/issues/4465/events | https://github.com/huggingface/datasets/pull/4465 | 1,265,754,479 | PR_kwDODunzps45X0XY | 4,465 | Fix bigbench config names | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-09T08:06:19" | "2022-06-09T14:38:36" | "2022-06-09T14:29:19" | MEMBER | null | Fix https://github.com/huggingface/datasets/issues/4462 in the case of bigbench | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4465/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4465/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4465",
"html_url": "https://github.com/huggingface/datasets/pull/4465",
"diff_url": "https://github.com/huggingface/datasets/pull/4465.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4465.patch",
"merged_at": "2022-06-09T14:29:18"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4464 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4464/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4464/comments | https://api.github.com/repos/huggingface/datasets/issues/4464/events | https://github.com/huggingface/datasets/pull/4464 | 1,265,682,931 | PR_kwDODunzps45XlWW | 4,464 | Extend support for streaming datasets that use xml.dom.minidom.parse | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-09T06:58:25" | "2022-06-09T08:43:24" | "2022-06-09T08:34:16" | MEMBER | null | This PR extends the support in streaming mode for datasets that use `xml.dom.minidom.parse`, by patching that function.
This PR adds support for streaming datasets like "Yaxin/SemEval2015".
Fix #4453. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4464/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4464/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4464",
"html_url": "https://github.com/huggingface/datasets/pull/4464",
"diff_url": "https://github.com/huggingface/datasets/pull/4464.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4464.patch",
"merged_at": "2022-06-09T08:34:15"
} | true |
https://api.github.com/repos/huggingface/datasets/issues/4463 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/4463/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/4463/comments | https://api.github.com/repos/huggingface/datasets/issues/4463/events | https://github.com/huggingface/datasets/pull/4463 | 1,265,093,211 | PR_kwDODunzps45Vnzu | 4,463 | Use config_id to check split sizes instead of config name | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | "2022-06-08T17:45:24" | "2022-06-09T08:15:43" | "2022-06-09T08:06:37" | MEMBER | null | Fix https://github.com/huggingface/datasets/issues/4462 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/4463/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/4463/timeline | null | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/4463",
"html_url": "https://github.com/huggingface/datasets/pull/4463",
"diff_url": "https://github.com/huggingface/datasets/pull/4463.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/4463.patch",
"merged_at": null
} | true |