url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
1.47B
node_id
stringlengths
18
32
number
int64
1
5.33k
title
stringlengths
1
276
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
stringlengths
20
20
updated_at
stringlengths
20
20
closed_at
stringlengths
20
20
author_association
stringclasses
3 values
active_lock_reason
null
draft
bool
2 classes
pull_request
dict
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
state_reason
stringclasses
3 values
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/5126
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5126/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5126/comments
https://api.github.com/repos/huggingface/datasets/issues/5126/events
https://github.com/huggingface/datasets/pull/5126
1,411,757,124
PR_kwDODunzps5A8Iw3
5,126
Fix class name of symbolic link
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5126). All of your documentation changes will be reflected on that endpoint.", "I have removed the reference to the Issue in the PR title, so that we avoid to have both references (to the issue and to the PR) in the merge commit to the main branch.\r\n\r\nInstead, it should be commented in the PR description, so that the PR is appropriately linked by GitHub to its corresponding Issue:\r\n\r\n> Fix #5098.", "@albertvillanova What should I test in your opinion? Also, where should I save the test file and how should I name it? Thanks for your support", "The regression test to be implemented should test what your PR fixes: that is, that `_resolve_single_pattern_locally` function does not resolve any symbolic link when passed a directory that does contain any.\r\n\r\nAs you are testing a function in `data_files.py`, the corresponding test should be in `tests/test_data_files.py`.\r\n\r\nYou could name the test something lilke: `test_resolve_single_pattern_locally_does_not_resolve_symbolic_links`\r\n\r\nYou could take inspiration from other tests there in that file." ]
2022-10-17T15:11:02Z
2022-11-14T14:40:18Z
2022-11-14T14:40:18Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5126.diff", "html_url": "https://github.com/huggingface/datasets/pull/5126", "merged_at": "2022-11-14T14:40:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/5126.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5126" }
Fix #5098
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5126/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5126/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5125
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5125/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5125/comments
https://api.github.com/repos/huggingface/datasets/issues/5125/events
https://github.com/huggingface/datasets/pull/5125
1,411,602,813
PR_kwDODunzps5A7nr8
5,125
Add `pyproject.toml` for `black`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-17T13:38:47Z
2022-10-17T14:23:27Z
2022-10-17T14:21:09Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5125.diff", "html_url": "https://github.com/huggingface/datasets/pull/5125", "merged_at": "2022-10-17T14:21:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/5125.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5125" }
Add `pyproject.toml` as a config file for the `black` tool to support VS Code's auto-formatting on save (and to be more consistent with the other HF projects).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5125/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5125/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5124
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5124/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5124/comments
https://api.github.com/repos/huggingface/datasets/issues/5124/events
https://github.com/huggingface/datasets/pull/5124
1,411,159,725
PR_kwDODunzps5A6HeL
5,124
Install tensorflow-macos dependency conditionally
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-17T08:45:08Z
2022-10-19T09:12:17Z
2022-10-19T09:10:06Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5124.diff", "html_url": "https://github.com/huggingface/datasets/pull/5124", "merged_at": "2022-10-19T09:10:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/5124.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5124" }
Fix #5118.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5124/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5124/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5123
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5123/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5123/comments
https://api.github.com/repos/huggingface/datasets/issues/5123/events
https://github.com/huggingface/datasets/issues/5123
1,410,828,756
I_kwDODunzps5UF4nU
5,123
datasets freezes with streaming mode in multiple-gpu
{ "avatar_url": "https://avatars.githubusercontent.com/u/59409879?v=4", "events_url": "https://api.github.com/users/jackfeinmann5/events{/privacy}", "followers_url": "https://api.github.com/users/jackfeinmann5/followers", "following_url": "https://api.github.com/users/jackfeinmann5/following{/other_user}", "gists_url": "https://api.github.com/users/jackfeinmann5/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jackfeinmann5", "id": 59409879, "login": "jackfeinmann5", "node_id": "MDQ6VXNlcjU5NDA5ODc5", "organizations_url": "https://api.github.com/users/jackfeinmann5/orgs", "received_events_url": "https://api.github.com/users/jackfeinmann5/received_events", "repos_url": "https://api.github.com/users/jackfeinmann5/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jackfeinmann5/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jackfeinmann5/subscriptions", "type": "User", "url": "https://api.github.com/users/jackfeinmann5" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "@lhoestq I tested the script without accelerator, and I confirm this is due to datasets part as this gets similar results without accelerator.", "Hi ! You said it works on 1 GPU but doesn't wortk without accelerator - what's the difference between running on 1 GPU and running without accelerator in your case ?", "Hi @lhoestq \r\nthanks for coming back to me. Sorry for the confusion I made. I meant this works fine on 1 GPU, but on multi-gpu it is freezing. \"accelerator\" is not an issue as if you adapt the code without accelerator this still gets the same issue.\r\nIn order to test it. Please run \"accelerate config\", then use the setup for multi-gpu in one node.\r\nAfter that run \"accelerate launch code.py\" and then you would see the freezing occurs.", "Hi @lhoestq \r\ncould you have the chance to reproduce the error by running the minimal example shared?\r\nthanks", "I think you need to do `train_dataset = train_dataset.with_format(\"torch\")` to work with the DataLoader in a multiprocessing setup :)\r\n\r\nThe hang is probably caused by our streamign lib `fsspec` which doesn't work in multiprocessing out of the box - but we made it work with the PyTorch DataLoader when the dataset format is set to \"torch\"", "Hi @lhoestq \r\nthanks for the response. I added the line suggested right before calling `with accelerator.main_process_first():` in the code above and I confirm this also freezes. to reproduce it please run \"accelerate launch code.py\". I was wondering if you could have more suggestions for me? I do not have an idea how to fix this or debug this freezing. many thanks.", "Maybe the `fsspec` stuff need to be clearer even before - can you try to run this function at the very beginning of your script ?\r\n```python\r\nimport fsspec\r\n\r\ndef _set_fsspec_for_multiprocess() -> None:\r\n \"\"\"\r\n Clear reference to the loop and thread.\r\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\r\n Only required for fsspec >= 0.9.0\r\n See https://github.com/fsspec/gcsfs/issues/379\r\n \"\"\"\r\n fsspec.asyn.iothread[0] = None\r\n fsspec.asyn.loop[0] = None\r\n\r\n_set_fsspec_for_multiprocess()\r\n```", "Hi @lhoestq \r\nthank you. I tried it, I am getting `AttributeError: module 'fsspec' has no attribute 'asyn'`. which version of fsspect do you use?\r\nI am using \r\n```fsspec 2022.8.2 pypi_0 pypi```\r\nthank you.", "Hi @lhoestq \r\nI solved `fsspec` error with this hack for now https://discuss.huggingface.co/t/attributeerror-module-fsspec-has-no-attribute-asyn/19255 but this is still freezing, I greatly appreciate if you could run this script on your side. Many thanks.\r\n\r\n```\r\nimport fsspec\r\n\r\ndef _set_fsspec_for_multiprocess() -> None:\r\n \"\"\"\r\n Clear reference to the loop and thread.\r\n This is necessary otherwise HTTPFileSystem hangs in the ML training loop.\r\n Only required for fsspec >= 0.9.0\r\n See https://github.com/fsspec/gcsfs/issues/379\r\n \"\"\"\r\n fsspec.asyn.iothread[0] = None\r\n fsspec.asyn.loop[0] = None\r\n\r\n\r\n_set_fsspec_for_multiprocess()\r\n\r\nfrom accelerate import Accelerator\r\nfrom accelerate.logging import get_logger\r\nfrom datasets import load_dataset\r\nfrom torch.utils.data.dataloader import DataLoader\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom transformers import AutoTokenizer\r\nimport torch\r\nfrom accelerate.logging import get_logger\r\nfrom torch.utils.data import IterableDataset\r\nfrom torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe\r\n\r\n\r\nlogger = get_logger(__name__)\r\n\r\n\r\nclass ConstantLengthDataset(IterableDataset):\r\n \"\"\"\r\n Iterable dataset that returns constant length chunks of tokens from stream of text files.\r\n Args:\r\n tokenizer (Tokenizer): The processor used for proccessing the data.\r\n dataset (dataset.Dataset): Dataset with text files.\r\n infinite (bool): If True the iterator is reset after dataset reaches end else stops.\r\n max_seq_length (int): Length of token sequences to return.\r\n num_of_sequences (int): Number of token sequences to keep in buffer.\r\n chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n tokenizer,\r\n dataset,\r\n infinite=False,\r\n max_seq_length=1024,\r\n num_of_sequences=1024,\r\n chars_per_token=3.6,\r\n ):\r\n self.tokenizer = tokenizer\r\n # self.concat_token_id = tokenizer.bos_token_id\r\n self.dataset = dataset\r\n self.max_seq_length = max_seq_length\r\n self.epoch = 0\r\n self.infinite = infinite\r\n self.current_size = 0\r\n self.max_buffer_size = max_seq_length * chars_per_token * num_of_sequences\r\n self.content_field = \"text\"\r\n\r\n def __iter__(self):\r\n iterator = iter(self.dataset)\r\n more_examples = True\r\n while more_examples:\r\n buffer, buffer_len = [], 0\r\n while True:\r\n if buffer_len >= self.max_buffer_size:\r\n break\r\n try:\r\n buffer.append(next(iterator)[self.content_field])\r\n buffer_len += len(buffer[-1])\r\n except StopIteration:\r\n if self.infinite:\r\n iterator = iter(self.dataset)\r\n self.epoch += 1\r\n logger.info(f\"Dataset epoch: {self.epoch}\")\r\n else:\r\n more_examples = False\r\n break\r\n tokenized_inputs = self.tokenizer(buffer, truncation=False)[\"input_ids\"]\r\n all_token_ids = []\r\n for tokenized_input in tokenized_inputs:\r\n all_token_ids.extend(tokenized_input)\r\n for i in range(0, len(all_token_ids), self.max_seq_length):\r\n input_ids = all_token_ids[i : i + self.max_seq_length]\r\n if len(input_ids) == self.max_seq_length:\r\n self.current_size += 1\r\n yield torch.tensor(input_ids)\r\n\r\n def shuffle(self, buffer_size=1000):\r\n return ShufflerIterDataPipe(self, buffer_size=buffer_size)\r\n\r\n\r\ndef create_dataloaders(tokenizer, accelerator):\r\n ds_kwargs = {\"streaming\": True}\r\n # In distributed training, the load_dataset function gaurantees that only one process\r\n # can concurrently download the dataset.\r\n datasets = load_dataset(\r\n \"c4\",\r\n \"en\",\r\n cache_dir=\"cache_dir\",\r\n **ds_kwargs,\r\n )\r\n train_data, valid_data = datasets[\"train\"], datasets[\"validation\"]\r\n with accelerator.main_process_first():\r\n train_data = train_data.shuffle(buffer_size=10000, seed=None)\r\n train_dataset = ConstantLengthDataset(\r\n tokenizer,\r\n train_data,\r\n infinite=True,\r\n max_seq_length=256,\r\n )\r\n valid_dataset = ConstantLengthDataset(\r\n tokenizer,\r\n valid_data,\r\n infinite=False,\r\n max_seq_length=256,\r\n )\r\n train_dataset = train_dataset.shuffle(buffer_size=10000)\r\n train_dataloader = DataLoader(train_dataset, batch_size=160, shuffle=True)\r\n eval_dataloader = DataLoader(valid_dataset, batch_size=160)\r\n return train_dataloader, eval_dataloader\r\n\r\n\r\ndef main():\r\n # Accelerator.\r\n logging_dir = \"data_save_dir/log\"\r\n accelerator = Accelerator(\r\n gradient_accumulation_steps=1,\r\n mixed_precision=\"bf16\",\r\n log_with=\"tensorboard\",\r\n logging_dir=logging_dir,\r\n )\r\n # We need to initialize the trackers we use, and also store our configuration.\r\n # The trackers initializes automatically on the main process.\r\n if accelerator.is_main_process:\r\n accelerator.init_trackers(\"test\")\r\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\r\n\r\n # Load datasets and create dataloaders.\r\n train_dataloader, _ = create_dataloaders(tokenizer, accelerator)\r\n\r\n train_dataloader = accelerator.prepare(train_dataloader)\r\n for step, batch in enumerate(train_dataloader, start=1):\r\n print(step)\r\n accelerator.end_training()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```" ]
2022-10-17T03:28:16Z
2022-10-24T16:13:43Z
null
NONE
null
null
null
## Describe the bug Hi. I am using this dataloader, which is for processing large datasets in streaming mode mentioned in one of examples of huggingface. I am using it to read c4: https://github.com/huggingface/transformers/blob/b48ac1a094e572d6076b46a9e4ed3e0ebe978afc/examples/research_projects/codeparrot/scripts/codeparrot_training.py#L22 During using multi-gpu in accelerator in one node, the code freezes, but works for 1 GPU: ``` 10/16/2022 14:18:46 - INFO - datasets.info - Loading Dataset Infos from /home/jack/.cache/huggingface/modules/datasets_modules/datasets/c4/df532b158939272d032cc63ef19cd5b83e9b4d00c922b833e4cb18b2e9869b01 Steps: 0%| | 0/400000 [00:00<?, ?it/s]10/16/2022 14:18:47 - INFO - torch.utils.data.dataloader - Shared seed (135290893754684706) sent to store on rank 0 ``` # Code to reproduce please run this code with `accelerate launch code.py` ``` from accelerate import Accelerator from accelerate.logging import get_logger from datasets import load_dataset from torch.utils.data.dataloader import DataLoader import torch from datasets import load_dataset from transformers import AutoTokenizer import torch from accelerate.logging import get_logger from torch.utils.data import IterableDataset from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe logger = get_logger(__name__) class ConstantLengthDataset(IterableDataset): """ Iterable dataset that returns constant length chunks of tokens from stream of text files. Args: tokenizer (Tokenizer): The processor used for proccessing the data. dataset (dataset.Dataset): Dataset with text files. infinite (bool): If True the iterator is reset after dataset reaches end else stops. max_seq_length (int): Length of token sequences to return. num_of_sequences (int): Number of token sequences to keep in buffer. chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer. """ def __init__( self, tokenizer, dataset, infinite=False, max_seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, ): self.tokenizer = tokenizer # self.concat_token_id = tokenizer.bos_token_id self.dataset = dataset self.max_seq_length = max_seq_length self.epoch = 0 self.infinite = infinite self.current_size = 0 self.max_buffer_size = max_seq_length * chars_per_token * num_of_sequences self.content_field = "text" def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: buffer, buffer_len = [], 0 while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(next(iterator)[self.content_field]) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) self.epoch += 1 logger.info(f"Dataset epoch: {self.epoch}") else: more_examples = False break tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"] all_token_ids = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input) for i in range(0, len(all_token_ids), self.max_seq_length): input_ids = all_token_ids[i : i + self.max_seq_length] if len(input_ids) == self.max_seq_length: self.current_size += 1 yield torch.tensor(input_ids) def shuffle(self, buffer_size=1000): return ShufflerIterDataPipe(self, buffer_size=buffer_size) def create_dataloaders(tokenizer, accelerator): ds_kwargs = {"streaming": True} # In distributed training, the load_dataset function gaurantees that only one process # can concurrently download the dataset. datasets = load_dataset( "c4", "en", cache_dir="cache_dir", **ds_kwargs, ) train_data, valid_data = datasets["train"], datasets["validation"] with accelerator.main_process_first(): train_data = train_data.shuffle(buffer_size=10000, seed=None) train_dataset = ConstantLengthDataset( tokenizer, train_data, infinite=True, max_seq_length=256, ) valid_dataset = ConstantLengthDataset( tokenizer, valid_data, infinite=False, max_seq_length=256, ) train_dataset = train_dataset.shuffle(buffer_size=10000) train_dataloader = DataLoader(train_dataset, batch_size=160, shuffle=True) eval_dataloader = DataLoader(valid_dataset, batch_size=160) return train_dataloader, eval_dataloader def main(): # Accelerator. logging_dir = "data_save_dir/log" accelerator = Accelerator( gradient_accumulation_steps=1, mixed_precision="bf16", log_with="tensorboard", logging_dir=logging_dir, ) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("test") tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # Load datasets and create dataloaders. train_dataloader, _ = create_dataloaders(tokenizer, accelerator) train_dataloader = accelerator.prepare(train_dataloader) for step, batch in enumerate(train_dataloader, start=1): print(step) accelerator.end_training() if __name__ == "__main__": main() ``` ## Results expected Being able to run the code for streamining datasets with multi-gpu ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.2 - Platform: linux - Python version: 3.9.12 - PyArrow version: 9.0.0 @lhoestq I do not have any idea why this freezing happens, and I removed the streaming mode and this was working fine, so I know this is caused by streaming mode of the dataloader part not working well with multi-gpu setting. Since datasets are large, I hope to keep the streamining mode. I very much appreciate your help.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5123/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5123/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5122
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5122/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5122/comments
https://api.github.com/repos/huggingface/datasets/issues/5122/events
https://github.com/huggingface/datasets/pull/5122
1,410,732,403
PR_kwDODunzps5A4rWn
5,122
Add warning
{ "avatar_url": "https://avatars.githubusercontent.com/u/34204311?v=4", "events_url": "https://api.github.com/users/Salehbigdeli/events{/privacy}", "followers_url": "https://api.github.com/users/Salehbigdeli/followers", "following_url": "https://api.github.com/users/Salehbigdeli/following{/other_user}", "gists_url": "https://api.github.com/users/Salehbigdeli/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Salehbigdeli", "id": 34204311, "login": "Salehbigdeli", "node_id": "MDQ6VXNlcjM0MjA0MzEx", "organizations_url": "https://api.github.com/users/Salehbigdeli/orgs", "received_events_url": "https://api.github.com/users/Salehbigdeli/received_events", "repos_url": "https://api.github.com/users/Salehbigdeli/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Salehbigdeli/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Salehbigdeli/subscriptions", "type": "User", "url": "https://api.github.com/users/Salehbigdeli" }
[]
closed
false
null
[]
null
[ "As mentioned in https://github.com/huggingface/datasets/issues/5105 I think we just need to keep the existing files instead of deleting them.\r\nThe `dataset_info.json` file contains the split names anyway, so we know which files belong to the dataset, and which ones don't." ]
2022-10-17T01:30:37Z
2022-11-05T12:23:53Z
2022-11-05T12:23:53Z
NONE
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5122.diff", "html_url": "https://github.com/huggingface/datasets/pull/5122", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5122.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5122" }
Fixes: #5105 I think removing the directory with warning is a better solution for this issue. Because if we decide to keep existing files in directory, then we should deal with the case providing same directory for several datasets! Which we know is not possible since `dataset_info.json` exists in that directory.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5122/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5122/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5121
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5121/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5121/comments
https://api.github.com/repos/huggingface/datasets/issues/5121/events
https://github.com/huggingface/datasets/pull/5121
1,410,681,067
PR_kwDODunzps5A4gUB
5,121
Bugfix ignore function when creating new_fingerprint for caching
{ "avatar_url": "https://avatars.githubusercontent.com/u/34204311?v=4", "events_url": "https://api.github.com/users/Salehbigdeli/events{/privacy}", "followers_url": "https://api.github.com/users/Salehbigdeli/followers", "following_url": "https://api.github.com/users/Salehbigdeli/following{/other_user}", "gists_url": "https://api.github.com/users/Salehbigdeli/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Salehbigdeli", "id": 34204311, "login": "Salehbigdeli", "node_id": "MDQ6VXNlcjM0MjA0MzEx", "organizations_url": "https://api.github.com/users/Salehbigdeli/orgs", "received_events_url": "https://api.github.com/users/Salehbigdeli/received_events", "repos_url": "https://api.github.com/users/Salehbigdeli/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Salehbigdeli/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Salehbigdeli/subscriptions", "type": "User", "url": "https://api.github.com/users/Salehbigdeli" }
[]
closed
false
null
[]
null
[ "Adding \"function\" to the kwargs to ignore when computing the fingerprint will break `map` caching. Indeed passing two different function would result in two different datasets that have the same fingerprint - and the cache wouldn't be able to distinguish them.\r\n\r\nE.g this code would reload ds1 from the cache insetad of computing the dataset for ds2\r\n```python\r\nds = Dataset.from_dict({\"a\": [1, 2, 3]})\r\nds1 = ds.map(lambda x: {\"b\": 1})\r\nds2 = ds.map(lambda x: {\"b\": 2})\r\n```" ]
2022-10-17T00:03:43Z
2022-10-17T12:39:36Z
2022-10-17T12:39:36Z
NONE
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5121.diff", "html_url": "https://github.com/huggingface/datasets/pull/5121", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5121.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5121" }
maybe fixes: #5109
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5121/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5121/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5120
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5120/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5120/comments
https://api.github.com/repos/huggingface/datasets/issues/5120/events
https://github.com/huggingface/datasets/pull/5120
1,410,641,221
PR_kwDODunzps5A4X10
5,120
Fix `tqdm` zip bug
{ "avatar_url": "https://avatars.githubusercontent.com/u/9879252?v=4", "events_url": "https://api.github.com/users/david1542/events{/privacy}", "followers_url": "https://api.github.com/users/david1542/followers", "following_url": "https://api.github.com/users/david1542/following{/other_user}", "gists_url": "https://api.github.com/users/david1542/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/david1542", "id": 9879252, "login": "david1542", "node_id": "MDQ6VXNlcjk4NzkyNTI=", "organizations_url": "https://api.github.com/users/david1542/orgs", "received_events_url": "https://api.github.com/users/david1542/received_events", "repos_url": "https://api.github.com/users/david1542/repos", "site_admin": false, "starred_url": "https://api.github.com/users/david1542/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david1542/subscriptions", "type": "User", "url": "https://api.github.com/users/david1542" }
[]
closed
false
null
[]
null
[ "@albertvillanova Thanks for your comment. What do you think about creating 2 `pbar` for each case? I see the `pbar_iterable` is initialized differently. Maybe `pbar` can also be initialized like that.", "@albertvillanova Another solution I implemented is to change `pbar_iterable` and add the `zip` to it. I updated the PR with this solution. Let me know what you think.", "_The documentation is not available anymore as the PR was closed or merged._", "@albertvillanova Done :) Let me know what you think.", "@albertvillanova Thanks :) I also don't see an easy way to test this. This was just a problem in the way `tqdm` was used. I'm not sure we should cover it in tests.", "Hi, \r\n\r\nFirst of all, thanks for this PR. \r\nIt's the first time I join a discussion on GitHUB on problem resolution in libraries such as transformers, so I hope I comply to the best practices for an efficient communication...\r\n\r\nI am running `AutoTokenizer.from_pretrained` in a Google Colab notebook for using with BERT base. \r\nI am experiencing issue [5117](https://github.com/huggingface/datasets/issues/5117).\r\n\r\nEach time I run my notebook, I do:\r\n\r\n`! pip install transformers \r\n! pip install datasets \r\n! pip install huggingface_hub`\r\n\r\nAs I understand, the issue has been resolved and the solution merged to the released version of the code?\r\nSo I expect that the bug is resolved in my notebook, however this is not the case.\r\n\r\nDo I get something wrong? \r\nDo I have to implement some change in the source code myself?\r\n\r\nThanks in advance for your help!", "@Cochonaki Hi :) The problem was fixed but there wasn't a release since then. I believe a new release should come out in the upcoming weeks. Maybe someone from the core maintainers can answer that :)\r\n\r\ncc: @albertvillanova ", "Baby Haiti Coffee SE is born\n\nNH watch\n\nOn Sun, Oct 23, 2022 at 02:39 Dudu Lasry ***@***.***> wrote:\n\n> @Cochonaki <https://github.com/Cochonaki> Hi :) The problem was fixed but\n> there wasn't a release since then. I believe a new release should come out\n> in the upcoming weeks. Maybe someone from the core maintainers can answer\n> that :)\n>\n> cc: @albertvillanova <https://github.com/albertvillanova>\n>\n> —\n> Reply to this email directly, view it on GitHub\n> <https://github.com/huggingface/datasets/pull/5120#issuecomment-1288024546>,\n> or unsubscribe\n> <https://github.com/notifications/unsubscribe-auth/AAB4E2NCT7QO7W3PTQGDIKDWETMQ7ANCNFSM6AAAAAARGRBY2M>\n> .\n> You are receiving this because you are subscribed to this thread.Message\n> ID: ***@***.***>\n>\n", "Hi, @Cochonaki.\r\n\r\nAs @david1542 pointed out, we have not made a release since this bug was fixed. We will make one in the following weeks.\r\n\r\nIn the meantime, if you would like to incorporate the bug fix, you can install `datasets` from this repo main branch:\r\n```shell\r\npip install git+https://github.com/huggingface/datasets#egg=datasets\r\n```", "Thanks a lot @albertvillanova and @david1542, it works now!\r\nI am really thankful for your help, that encourages me to participate more in this community.\r\nSee you around!", "Welcome!!! 🤗" ]
2022-10-16T22:19:18Z
2022-10-23T10:27:53Z
2022-10-19T08:53:17Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5120.diff", "html_url": "https://github.com/huggingface/datasets/pull/5120", "merged_at": "2022-10-19T08:53:17Z", "patch_url": "https://github.com/huggingface/datasets/pull/5120.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5120" }
This PR solves #5117, by wrapping the entire `zip` clause in tqdm. For more information, please checkout this Stack Overflow thread: https://stackoverflow.com/questions/41171191/tqdm-progressbar-and-zip-built-in-do-not-work-together
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5120/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5120/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5119
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5119/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5119/comments
https://api.github.com/repos/huggingface/datasets/issues/5119/events
https://github.com/huggingface/datasets/pull/5119
1,410,561,363
PR_kwDODunzps5A4IQp
5,119
[TYPO] Update new_dataset_script.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cakiki", "id": 3664563, "login": "cakiki", "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "organizations_url": "https://api.github.com/users/cakiki/orgs", "received_events_url": "https://api.github.com/users/cakiki/received_events", "repos_url": "https://api.github.com/users/cakiki/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "type": "User", "url": "https://api.github.com/users/cakiki" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-16T17:36:49Z
2022-10-19T09:48:19Z
2022-10-19T09:45:59Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5119.diff", "html_url": "https://github.com/huggingface/datasets/pull/5119", "merged_at": "2022-10-19T09:45:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/5119.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5119" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5119/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5119/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5118
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5118/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5118/comments
https://api.github.com/repos/huggingface/datasets/issues/5118/events
https://github.com/huggingface/datasets/issues/5118
1,410,547,373
I_kwDODunzps5UEz6t
5,118
Installing `datasets` on M1 computers
{ "avatar_url": "https://avatars.githubusercontent.com/u/9879252?v=4", "events_url": "https://api.github.com/users/david1542/events{/privacy}", "followers_url": "https://api.github.com/users/david1542/followers", "following_url": "https://api.github.com/users/david1542/following{/other_user}", "gists_url": "https://api.github.com/users/david1542/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/david1542", "id": 9879252, "login": "david1542", "node_id": "MDQ6VXNlcjk4NzkyNTI=", "organizations_url": "https://api.github.com/users/david1542/orgs", "received_events_url": "https://api.github.com/users/david1542/received_events", "repos_url": "https://api.github.com/users/david1542/repos", "site_admin": false, "starred_url": "https://api.github.com/users/david1542/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david1542/subscriptions", "type": "User", "url": "https://api.github.com/users/david1542" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "Thanks for reporting, @david1542." ]
2022-10-16T16:50:08Z
2022-10-19T09:10:08Z
2022-10-19T09:10:08Z
CONTRIBUTOR
null
null
null
## Describe the bug I wanted to install `datasets` dependencies on my M1 (in order to start contributing to the project). However, I got an error regarding `tensorflow`. On M1, `tensorflow-macos` needs to be installed instead. Can we add a conditional requirement, so that `tensorflow-macos` would be installed on M1? ## Steps to reproduce the bug Fresh clone this project (on m1), create a virtualenv and run this: ```python pip install -e ".[dev]" ``` ## Expected results Installation should be smooth, and all the dependencies should be installed on M1. ## Actual results You should receive an error, saying pip couldn't find a version that matches this pattern: ``` tensorflow>=2.3,!=2.6.0,!=2.6.1 ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.6.2.dev0 - Platform: macOS-12.6-arm64-arm-64bit - Python version: 3.9.6 - PyArrow version: 7.0.0 - Pandas version: 1.5.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5118/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5118/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5117
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5117/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5117/comments
https://api.github.com/repos/huggingface/datasets/issues/5117/events
https://github.com/huggingface/datasets/issues/5117
1,409,571,346
I_kwDODunzps5UBFoS
5,117
Progress bars have color red and never completed to 100%
{ "avatar_url": "https://avatars.githubusercontent.com/u/63857529?v=4", "events_url": "https://api.github.com/users/echatzikyriakidis/events{/privacy}", "followers_url": "https://api.github.com/users/echatzikyriakidis/followers", "following_url": "https://api.github.com/users/echatzikyriakidis/following{/other_user}", "gists_url": "https://api.github.com/users/echatzikyriakidis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/echatzikyriakidis", "id": 63857529, "login": "echatzikyriakidis", "node_id": "MDQ6VXNlcjYzODU3NTI5", "organizations_url": "https://api.github.com/users/echatzikyriakidis/orgs", "received_events_url": "https://api.github.com/users/echatzikyriakidis/received_events", "repos_url": "https://api.github.com/users/echatzikyriakidis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/echatzikyriakidis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/echatzikyriakidis/subscriptions", "type": "User", "url": "https://api.github.com/users/echatzikyriakidis" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/9879252?v=4", "events_url": "https://api.github.com/users/david1542/events{/privacy}", "followers_url": "https://api.github.com/users/david1542/followers", "following_url": "https://api.github.com/users/david1542/following{/other_user}", "gists_url": "https://api.github.com/users/david1542/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/david1542", "id": 9879252, "login": "david1542", "node_id": "MDQ6VXNlcjk4NzkyNTI=", "organizations_url": "https://api.github.com/users/david1542/orgs", "received_events_url": "https://api.github.com/users/david1542/received_events", "repos_url": "https://api.github.com/users/david1542/repos", "site_admin": false, "starred_url": "https://api.github.com/users/david1542/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david1542/subscriptions", "type": "User", "url": "https://api.github.com/users/david1542" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/9879252?v=4", "events_url": "https://api.github.com/users/david1542/events{/privacy}", "followers_url": "https://api.github.com/users/david1542/followers", "following_url": "https://api.github.com/users/david1542/following{/other_user}", "gists_url": "https://api.github.com/users/david1542/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/david1542", "id": 9879252, "login": "david1542", "node_id": "MDQ6VXNlcjk4NzkyNTI=", "organizations_url": "https://api.github.com/users/david1542/orgs", "received_events_url": "https://api.github.com/users/david1542/received_events", "repos_url": "https://api.github.com/users/david1542/repos", "site_admin": false, "starred_url": "https://api.github.com/users/david1542/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david1542/subscriptions", "type": "User", "url": "https://api.github.com/users/david1542" } ]
null
[ "Hi @echatzikyriakidis, thanks for submitting the issue.\r\nWhich shell are you using exactly? I tried to run the command you sent, but I don't see colors at all 🧐\r\n\r\nI tried from bash and zsh as well.", "Hi @david1542 ,\r\n\r\nI use Google Colab.\r\n", "Got it. I [created a PR](https://github.com/huggingface/datasets/pull/5120) that fixes this issue. Turns out that the wrapping logic for the inner loop was slightly incorrect.", "Thank you!" ]
2022-10-14T16:12:30Z
2022-10-23T12:58:41Z
2022-10-23T12:58:41Z
NONE
null
null
null
## Describe the bug Progress bars after transformative operations turn in red and never be completed to 100% ## Steps to reproduce the bug ```python from datasets import load_dataset load_dataset('rotten_tomatoes', split='test').filter(lambda o: True) ``` ## Expected results Progress bar should be 100% and green ## Actual results Progress bar turn in red and never completed to 100% ## Environment info - `datasets` version: 2.6.1 - Platform: Linux-5.10.133+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.14 - PyArrow version: 6.0.1 - Pandas version: 1.3.5
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5117/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5117/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5116
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5116/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5116/comments
https://api.github.com/repos/huggingface/datasets/issues/5116/events
https://github.com/huggingface/datasets/pull/5116
1,409,549,471
PR_kwDODunzps5A09sk
5,116
Use yaml for issue templates + revamp
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-14T15:53:13Z
2022-10-19T13:05:49Z
2022-10-19T13:03:22Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5116.diff", "html_url": "https://github.com/huggingface/datasets/pull/5116", "merged_at": "2022-10-19T13:03:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/5116.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5116" }
Use YAML instead of markdown (more expressive) for the issue templates. In addition, update their structure/fields to be more aligned with Transformers. PS: also removes the "add_dataset" PR template, as we no longer accept such PRs.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5116/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5116/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5115
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5115/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5115/comments
https://api.github.com/repos/huggingface/datasets/issues/5115/events
https://github.com/huggingface/datasets/pull/5115
1,409,250,020
PR_kwDODunzps5Az9Pm
5,115
Fix iter_batches
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I also ran the code in https://github.com/huggingface/datasets/issues/5111 and it works fine now :)", "This is ready for review :)" ]
2022-10-14T12:06:14Z
2022-10-14T15:02:15Z
2022-10-14T14:59:58Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5115.diff", "html_url": "https://github.com/huggingface/datasets/pull/5115", "merged_at": "2022-10-14T14:59:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/5115.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5115" }
The `pa.Table.to_reader()` method available in `pyarrow>=8.0.0` may return chunks of size < `max_chunksize`, therefore `iter_batches` can return batches smaller than the `batch_size` specified by the user Therefore batched `map` couldn't always use batches of the right size, e.g. this fails because it runs only on one batch of one element: ```python from datasets import Dataset, concatenate_datasets ds = concatenate_datasets([Dataset.from_dict({"a": [i]}) for i in range(10)]) ds2 = ds.map(lambda _: {}, batched=True) assert list(ds2) == list(ds) ``` This was introduced in https://github.com/huggingface/datasets/pull/5030 Close https://github.com/huggingface/datasets/issues/5111 This will require a patch release along with https://github.com/huggingface/datasets/pull/5113 TODO: - [x] fix tests - [x] add more tests
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5115/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5115/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5114
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5114/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5114/comments
https://api.github.com/repos/huggingface/datasets/issues/5114/events
https://github.com/huggingface/datasets/issues/5114
1,409,236,738
I_kwDODunzps5T_z8C
5,114
load_from_disk with remote filesystem fails due to a wrong temporary local folder path
{ "avatar_url": "https://avatars.githubusercontent.com/u/48770768?v=4", "events_url": "https://api.github.com/users/Hubert-Bonisseur/events{/privacy}", "followers_url": "https://api.github.com/users/Hubert-Bonisseur/followers", "following_url": "https://api.github.com/users/Hubert-Bonisseur/following{/other_user}", "gists_url": "https://api.github.com/users/Hubert-Bonisseur/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Hubert-Bonisseur", "id": 48770768, "login": "Hubert-Bonisseur", "node_id": "MDQ6VXNlcjQ4NzcwNzY4", "organizations_url": "https://api.github.com/users/Hubert-Bonisseur/orgs", "received_events_url": "https://api.github.com/users/Hubert-Bonisseur/received_events", "repos_url": "https://api.github.com/users/Hubert-Bonisseur/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Hubert-Bonisseur/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Hubert-Bonisseur/subscriptions", "type": "User", "url": "https://api.github.com/users/Hubert-Bonisseur" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "Hi Hubert! Could you please probably create a publicly available `gs://` dataset link? I think this would be easier for others to directly start to debug.", "What seems to work is to change the line to:\r\n```\r\nfs.download(src_dataset_path, dataset_path.parent.as_posix(), recursive=True)\r\n```" ]
2022-10-14T11:54:53Z
2022-11-19T07:13:10Z
null
NONE
null
null
null
## Describe the bug The function load_from_disk fails when using a remote filesystem because of a wrong temporary path generation in the load_from_disk method of arrow_dataset.py: ```python if is_remote_filesystem(fs): src_dataset_path = extract_path_from_uri(dataset_path) dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dataset_path.as_posix(), recursive=True) ``` If _dataset_path_ is `gs://speech/mydataset/train`, then _src_dataset_path_ will be `speech/mydataset/train` and _dataset_path_ will be something like `/var/folders/9s/gf0b/T/tmp6t/speech/mydataset/train` Then, after downloading the **folder** _src_dataset_path_, you will get a path like `/var/folders/9s/gf0b/T/tmp6t/speech/mydataset/train/train/state.json` (notice we have train twice) Instead of downloading the remote folder we should be downloading all the files in the folder for the path to be right: ```python fs.download(os.path.join(src_dataset_path,*), dataset_path.as_posix(), recursive=True) ``` ## Steps to reproduce the bug ```python fs = gcsfs.GCSFileSystem(**storage_options) dataset = load_from_disk("common_voice_processed") # loading local dataset previously saved locally, works fine dataset.save_to_disk(output_dir, fs=fs) #works fine dataset = load_from_disk(output_dir, fs=fs) # crashes ``` ## Expected results The dataset is loaded ## Actual results FileNotFoundError: [Errno 2] No such file or directory: '/var/folders/9s/gf0b9jz15d517yrf7m3nvlxr0000gn/T/tmp6t5e221_/speech/datasets/tests/common_voice_processed/train/state.json' ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: datasets-2.6.1.dev0 - Platform: mac os monterey 12.5.1 - Python version: 3.8.13 - PyArrow version:pyarrow==9.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5114/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5114/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5113
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5113/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5113/comments
https://api.github.com/repos/huggingface/datasets/issues/5113/events
https://github.com/huggingface/datasets/pull/5113
1,409,207,607
PR_kwDODunzps5Az0Ei
5,113
Fix filter indices when batched
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I think a patch release will be necessary.", "I'm also fixing https://github.com/huggingface/datasets/issues/5111 which will lalso require a patch release" ]
2022-10-14T11:30:03Z
2022-10-24T06:21:09Z
2022-10-14T12:11:44Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5113.diff", "html_url": "https://github.com/huggingface/datasets/pull/5113", "merged_at": "2022-10-14T12:11:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/5113.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5113" }
This PR fixes a bug introduced by: - #5030 Fix #5112.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5113/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5113/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5112
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5112/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5112/comments
https://api.github.com/repos/huggingface/datasets/issues/5112/events
https://github.com/huggingface/datasets/issues/5112
1,409,143,409
I_kwDODunzps5T_dJx
5,112
Bug with filtered indices
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "The issue is here:\r\nhttps://github.com/huggingface/datasets/blob/3ad9644b9a2e4558dd1d0f1e43c67658674e6228/src/datasets/arrow_dataset.py#L2964", "@PartiallyTyped, @Muennighoff: the issue is fixed.\r\n\r\nWe are planning to make a patch release today.", "Thanks a lot for the swift response! For a brief moment yesterday I thought I had gone insane 🤣On 14 Oct 2022, at 15:44, Albert Villanova del Moral ***@***.***> wrote:\n@PartiallyTyped, @Muennighoff: the issue is fixed.\nWe are planning to make a patch release today.\n\n—Reply to this email directly, view it on GitHub, or unsubscribe.You are receiving this because you were mentioned.Message ID: ***@***.***>" ]
2022-10-14T10:35:47Z
2022-10-14T13:55:03Z
2022-10-14T12:11:45Z
MEMBER
null
null
null
## Describe the bug As reported by @PartiallyTyped (and by @Muennighoff): - https://github.com/huggingface/datasets/issues/5111#issuecomment-1278652524 There is an issue with the indices of a filtered dataset. ## Steps to reproduce the bug ```python ds = Dataset.from_dict({"num": [0, 1, 2, 3]}) ds = ds.filter(lambda num: num % 2 == 0, input_columns="num", batch_size=2) assert all(item["num"] % 2 == 0 for item in ds) ``` ## Expected results The indices of the filtered dataset should correspond to the examples with "language" equals to "english". ## Actual results Indices to items with other languages are included in the filtered dataset indices ## Preliminar investigation It seems a bug introduced by: - #5030
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5112/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5112/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5111
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5111/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5111/comments
https://api.github.com/repos/huggingface/datasets/issues/5111/events
https://github.com/huggingface/datasets/issues/5111
1,408,143,170
I_kwDODunzps5T7o9C
5,111
map and filter not working properly in multiprocessing with the new release 2.6.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/44069155?v=4", "events_url": "https://api.github.com/users/loubnabnl/events{/privacy}", "followers_url": "https://api.github.com/users/loubnabnl/followers", "following_url": "https://api.github.com/users/loubnabnl/following{/other_user}", "gists_url": "https://api.github.com/users/loubnabnl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/loubnabnl", "id": 44069155, "login": "loubnabnl", "node_id": "MDQ6VXNlcjQ0MDY5MTU1", "organizations_url": "https://api.github.com/users/loubnabnl/orgs", "received_events_url": "https://api.github.com/users/loubnabnl/received_events", "repos_url": "https://api.github.com/users/loubnabnl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/loubnabnl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/loubnabnl/subscriptions", "type": "User", "url": "https://api.github.com/users/loubnabnl" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
[ "Same bug exists with `num_proc=1` on colab. `3.7.14 (default, Sep 8 2022, 00:06:44) [GCC 7.5.0]` ", "Thanks for reporting, @loubnabnl and for the additional information, @PartiallyTyped.\r\n\r\nHowever, I'm not able to reproduce this issue, neither locally nor on Colab:\r\n```\r\nDataset({\r\n features: ['repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated'],\r\n num_rows: 10\r\n})\r\nDataset({\r\n features: ['repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated'],\r\n num_rows: 10\r\n})\r\n```\r\nCC: @huggingface/datasets can anybody reproduce this?", "This is the minimum reproducible example. I ran this on the premium instances of colab.\r\n\r\n```\r\n# !pip install datasets\r\nimport datasets\r\nfrom datasets import load_dataset\r\nds = load_dataset(\"copenlu/answerable_tydiqa\").filter(\"english\".__eq__, input_columns=\"language\")\r\nassert all(map(\"english\".__eq__, ds[\"train\"][\"language\"]))\r\n```\r\n\r\nIn my case, the number of samples is correct, however, the samples selected when indexing are wrong.\r\n\r\n```python\r\nDatasetDict({\r\n validation: Dataset({\r\n features: ['question_text', 'document_title', 'language', 'annotations', 'document_plaintext', 'document_url'],\r\n num_rows: 990\r\n })\r\n train: Dataset({\r\n features: ['question_text', 'document_title', 'language', 'annotations', 'document_plaintext', 'document_url'],\r\n num_rows: 7389\r\n })\r\n})\r\n```\r\n\r\nThe number of rows is indeed correct, and i have checked it with a version that works.", "I can reproduce the issue on my mac too \r\n```\r\n- `datasets` version: 2.6.0\r\n- Platform: macOS-12.2.1-arm64-arm-64bit\r\n- Python version: 3.9.13\r\n- PyArrow version: 9.0.0\r\n- Pandas version: 1.4.3\r\n```\r\nBut not on Colab with python 3.7, maybe related to python version? (didn't manage to install python 3.9)\r\n```\r\n- `datasets` version: 2.6.0\r\n- Platform: Linux-5.10.133+-x86_64-with-Ubuntu-18.04-bionic\r\n- Python version: 3.7.14\r\n- PyArrow version: 9.0.0\r\n- Pandas version: 1.3.5\r\n```", "I have the same issue, here's a simple notebook to reproduce: https://colab.research.google.com/drive/1Lvo9fg5DSpGUUgXW5JAutZ0bFsR-WV--?usp=sharing\r\n\r\n\r\n\r\n", "I think there are 2 different issues here:\r\n- the one reported by @loubnabnl is related to multiprocessing in map and then filter; we should reproduce it first: I have tried with Python version 3.9.7 and I can't reproduce it either; maybe it is related to the version of PyArrow? To be checked.\r\n- the issue reported by @PartiallyTyped is related just to \"filter\" (without multiprocessing) and I can reproduce it.", "Could you create another issue for the @PartiallyTyped one please ?\r\n\r\nRegarding the OP issue, I also tried on colab or locally on py3.7 or py3.10 but didn't reproduce", "I have created another issue for the one reported by @PartiallyTyped: \r\n- #5112 ", "I managed to reproduce your issue @loubnabnl on colab by upgrading pyarrow to 9.0.0 instead of 6.0.1", "I managed to have a _super_ minimal reproducible example:\r\n```python\r\n\r\nfrom datasets import Dataset, concatenate_datasets\r\n\r\nds = concatenate_datasets([Dataset.from_dict({\"a\": [i]}) for i in range(10)])\r\nds2 = ds.map(lambda _: {}, batched=True)\r\nassert list(ds2) == list(ds)\r\n```\r\n(filter uses a batched `map` under the hood)", "> the one reported by @loubnabnl is related to multiprocessing in map and then filter; we should reproduce it first: I have tried with Python version 3.9.7 and I can't reproduce it either; maybe it is related to the version of PyArrow? To be checked.\r\n\r\nSo finally it was related to PyArrow version! :+1: ", "Doing a patch release asap :)", "Did the patch release yesterday, lmk if you still have issues", "It works now, thanks!\r\n" ]
2022-10-13T17:00:55Z
2022-10-17T08:26:59Z
2022-10-14T14:59:59Z
NONE
null
null
null
## Describe the bug When mapping is used on a dataset with more than one process, there is a weird behavior when trying to use `filter` , it's like only the samples from one worker are retrieved, one needs to specify the same `num_proc` in filter for it to work properly. This doesn't happen with `datasets` version 2.5.2 In the code below the data is filtered differently when we increase `num_proc` used in `map` although the datsets before and after mapping have identical elements. ## Steps to reproduce the bug ```python import datasets from datasets import load_dataset def preprocess(example): return example ds = load_dataset("codeparrot/codeparrot-clean-valid", split="train").select([i for i in range(10)]) ds1 = ds.map(preprocess, num_proc=2) ds2 = ds.map(preprocess) # the datasets elements are the same for i in range(len(ds1)): assert ds1[i]==ds2[i] print(f'Target column before filtering {ds1["autogenerated"]}') print(f'Target column before filtering {ds2["autogenerated"]}') print(f"datasets version {datasets.__version__}") ds_filtered_1 = ds1.filter(lambda x: not x["autogenerated"]) ds_filtered_2 = ds2.filter(lambda x: not x["autogenerated"]) # all elements in Target column are false so they should all be kept, but for ds2 only the first 5=num_samples/num_proc are kept print(ds_filtered_1) print(ds_filtered_2) ``` ``` Target column before filtering [False, False, False, False, False, False, False, False, False, False] Target column before filtering [False, False, False, False, False, False, False, False, False, False] Dataset({ features: ['repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated'], num_rows: 5 }) Dataset({ features: ['repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated'], num_rows: 10 }) ``` ## Expected results Increasing `num_proc` in mapping shouldn't alter filtering. With the previous version 2.5.2 this doesn't happen ## Actual results Filtering doesn't work properly when we increase `num_proc` in mapping but not when calling `filter` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.6.0 - Platform: Linux-4.19.0-22-cloud-amd64-x86_64-with-glibc2.28 - Python version: 3.9.13 - PyArrow version: 8.0.0 - Pandas version: 1.4.2
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5111/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5111/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5109
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5109/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5109/comments
https://api.github.com/repos/huggingface/datasets/issues/5109/events
https://github.com/huggingface/datasets/issues/5109
1,407,434,706
I_kwDODunzps5T47_S
5,109
Map caching not working for some class methods
{ "avatar_url": "https://avatars.githubusercontent.com/u/23029765?v=4", "events_url": "https://api.github.com/users/Mouhanedg56/events{/privacy}", "followers_url": "https://api.github.com/users/Mouhanedg56/followers", "following_url": "https://api.github.com/users/Mouhanedg56/following{/other_user}", "gists_url": "https://api.github.com/users/Mouhanedg56/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mouhanedg56", "id": 23029765, "login": "Mouhanedg56", "node_id": "MDQ6VXNlcjIzMDI5NzY1", "organizations_url": "https://api.github.com/users/Mouhanedg56/orgs", "received_events_url": "https://api.github.com/users/Mouhanedg56/received_events", "repos_url": "https://api.github.com/users/Mouhanedg56/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mouhanedg56/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mouhanedg56/subscriptions", "type": "User", "url": "https://api.github.com/users/Mouhanedg56" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "The hash used for caching is computed by pickling recursively the function passed to `map`. Maybe some objects don't have the same hash across sessions. In particular you can check the hash of your model using\r\n```python\r\nfrom datasets.fingerprint import Hasher\r\nobj = AutoModel.from_config(config=config, add_pooling_layer=False)\r\nprint(Hasher.hash(obj))\r\n```\r\n\r\nYou can find mode info here: https://huggingface.co/docs/datasets/about_cache\r\n\r\nYou can also provide your own unique hash in `map` if you want, with the `new_fingerprint` argument", "Indeed, the hash is changing. The `dumps` function serialize the model object in different ways because the model object is not deterministic\r\n```python\r\nfrom datasets.utils.py_utils import dumps\r\nobj1 = AutoModel.from_config(config=config, add_pooling_layer=False)\r\nobj2 = AutoModel.from_config(config=config, add_pooling_layer=False)\r\n\r\ndumps(bert) == dumps(bert2). # False\r\n```\r\n\r\n> You can find mode info here: https://huggingface.co/docs/datasets/about_cache\r\n> \r\n> You can also provide your own unique hash in map if you want, with the new_fingerprint argument\r\n\r\n\r\nThanks, the doc is so helpful. Indeed, we can fix the hash and get cache hit using `new_fingerprint`. Closing the issue." ]
2022-10-13T09:12:58Z
2022-10-17T10:38:45Z
2022-10-17T10:38:45Z
CONTRIBUTOR
null
null
null
## Describe the bug The cache loading is not working as expected for some class methods with a model stored in an attribute. The new fingerprint for `_map_single` is not the same at each run. The hasher generate a different hash for the class method. This comes from `dumps` function in `datasets.utils.py_utils` which generates a different dump at each run. ## Steps to reproduce the bug ```python from datasets import load_dataset from transformers import AutoConfig, AutoModel, AutoTokenizer dataset = load_dataset("ethos", "binary") BASE_MODELNAME = "sentence-transformers/all-MiniLM-L6-v2" class Object: def __init__(self): config = AutoConfig.from_pretrained(BASE_MODELNAME) self.bert = AutoModel.from_config(config=config, add_pooling_layer=False) self.tok = AutoTokenizer.from_pretrained(BASE_MODELNAME) def tokenize(self, examples): tokenized_texts = self.tok( examples["text"], padding="max_length", truncation=True, max_length=256, ) return tokenized_texts instance = Object() result = dict() for phase in ["train"]: result[phase] = dataset[phase].map(instance.tokenize, batched=True, load_from_cache_file=True, num_proc=2) ``` ## Expected results Load cache instead of recompute result. ## Actual results Result recomputed from scratch at each run. The cache works fine when deleting `bert` attribute. ## Environment info - `datasets` version: 2.5.3.dev0 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.9.13 - PyArrow version: 7.0.0 - Pandas version: 1.5.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5109/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5109/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5108
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5108/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5108/comments
https://api.github.com/repos/huggingface/datasets/issues/5108/events
https://github.com/huggingface/datasets/pull/5108
1,407,044,107
PR_kwDODunzps5AskeK
5,108
Fix a typo in arrow_dataset.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/5431913?v=4", "events_url": "https://api.github.com/users/yangky11/events{/privacy}", "followers_url": "https://api.github.com/users/yangky11/followers", "following_url": "https://api.github.com/users/yangky11/following{/other_user}", "gists_url": "https://api.github.com/users/yangky11/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yangky11", "id": 5431913, "login": "yangky11", "node_id": "MDQ6VXNlcjU0MzE5MTM=", "organizations_url": "https://api.github.com/users/yangky11/orgs", "received_events_url": "https://api.github.com/users/yangky11/received_events", "repos_url": "https://api.github.com/users/yangky11/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yangky11/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangky11/subscriptions", "type": "User", "url": "https://api.github.com/users/yangky11" }
[]
closed
false
null
[]
null
[]
2022-10-13T02:33:55Z
2022-10-14T09:47:28Z
2022-10-14T09:47:27Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5108.diff", "html_url": "https://github.com/huggingface/datasets/pull/5108", "merged_at": "2022-10-14T09:47:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/5108.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5108" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5108/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5108/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5107
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5107/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5107/comments
https://api.github.com/repos/huggingface/datasets/issues/5107/events
https://github.com/huggingface/datasets/pull/5107
1,406,736,710
PR_kwDODunzps5ArjCZ
5,107
Multiprocessed dataset builder
{ "avatar_url": "https://avatars.githubusercontent.com/u/26709476?v=4", "events_url": "https://api.github.com/users/TevenLeScao/events{/privacy}", "followers_url": "https://api.github.com/users/TevenLeScao/followers", "following_url": "https://api.github.com/users/TevenLeScao/following{/other_user}", "gists_url": "https://api.github.com/users/TevenLeScao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/TevenLeScao", "id": 26709476, "login": "TevenLeScao", "node_id": "MDQ6VXNlcjI2NzA5NDc2", "organizations_url": "https://api.github.com/users/TevenLeScao/orgs", "received_events_url": "https://api.github.com/users/TevenLeScao/received_events", "repos_url": "https://api.github.com/users/TevenLeScao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/TevenLeScao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TevenLeScao/subscriptions", "type": "User", "url": "https://api.github.com/users/TevenLeScao" }
[]
closed
false
null
[]
null
[ "I would also like to add a test, but am not sure whether it should go into `test_builder` (more natural imo) or `test_load` (which already contains a lot of the things I have to import to run my current testing setup). For reference, what I run to test that it works looks like:\r\n\r\n```\r\nimport os\r\nfrom pathlib import Path\r\nimport shutil\r\n\r\nimport datasets\r\nfrom datasets.builder import DatasetBuilder\r\nfrom datasets.features import Features, Value\r\n\r\nDATASET_LOADING_SCRIPT_NAME = \"__dummy_dataset1__\"\r\n\r\nDATASET_LOADING_SCRIPT_CODE = \"\"\"\r\nimport os\r\n\r\nimport datasets\r\nfrom datasets import DatasetInfo, Features, Split, SplitGenerator, Value\r\n\r\n\r\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\r\n\r\n def _info(self) -> DatasetInfo:\r\n return DatasetInfo(features=Features({\"text\": Value(\"string\")}))\r\n\r\n def _split_generators(self, dl_manager):\r\n return [\r\n SplitGenerator(Split.TRAIN, gen_kwargs={\"filepaths\": [os.path.join(dl_manager.manual_dir, \"train1.txt\"), os.path.join(dl_manager.manual_dir, \"train2.txt\")]}),\r\n SplitGenerator(Split.TEST, gen_kwargs={\"filepaths\": [os.path.join(dl_manager.manual_dir, \"test.txt\")]}),\r\n ]\r\n\r\n def _generate_examples(self, filepaths, **kwargs):\r\n idx = 0\r\n for filepath in filepaths:\r\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\r\n for line in f:\r\n yield idx, {\"text\": line.strip()}\r\n idx += 1\r\n\"\"\"\r\n\r\n\r\ndef dataset_loading_script_dir(tmp_path):\r\n script_name = DATASET_LOADING_SCRIPT_NAME\r\n script_dir = tmp_path / script_name\r\n script_dir.mkdir()\r\n script_path = script_dir / f\"{script_name}.py\"\r\n with open(script_path, \"w\") as f:\r\n f.write(DATASET_LOADING_SCRIPT_CODE)\r\n return str(script_dir)\r\n\r\n\r\ndef data_dir(tmp_path):\r\n data_dir = tmp_path / \"data_dir\"\r\n data_dir.mkdir()\r\n with open(data_dir / \"train1.txt\", \"w\") as f:\r\n f.write(\"foo\\n\" * 10)\r\n with open(data_dir / \"train2.txt\", \"w\") as f:\r\n f.write(\"foo\\n\" * 10)\r\n with open(data_dir / \"test.txt\", \"w\") as f:\r\n f.write(\"bar\\n\" * 10)\r\n return str(data_dir)\r\n\r\n\r\ndef load_dataset_builder_multiprocessed(tmp_path):\r\n builder = datasets.load_dataset_builder(\r\n os.path.join(dataset_loading_script_dir(tmp_path), DATASET_LOADING_SCRIPT_NAME + \".py\"),\r\n data_dir=data_dir(tmp_path),\r\n )\r\n assert isinstance(builder, DatasetBuilder)\r\n assert builder.name == DATASET_LOADING_SCRIPT_NAME\r\n assert builder.info.features == Features({\"text\": Value(\"string\")})\r\n builder.download_and_prepare(tmp_path / \"prepare_target\", max_shard_size=500, num_proc=2)\r\n\r\nif __name__ == \"__main__\":\r\n tmp_path = \"tmp\"\r\n if os.path.exists(tmp_path):\r\n raise FileExistsError(f\"path {tmp_path} already exists\")\r\n os.makedirs(tmp_path)\r\n try:\r\n load_dataset_builder_multiprocessed(Path(tmp_path))\r\n finally:\r\n # pass\r\n shutil.rmtree(tmp_path)\r\n```", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5107). All of your documentation changes will be reflected on that endpoint.", "Nice ! I think the test can go in `test_builder.py` :)", "I've added sharded arrow dataset loading. Two WIP items in the PR:\r\n- ~~Order is not conserved (it seems like the sharded files are read in the wrong order)~~\r\n- the tqdm for preparing the splits is wrong (it compares against the size of the whole split rather than against the size of the multiprocessing shard, but I am not sure how to access the latter)\r\n\r\nAlso `naming.filenames_for_dataset_split` is not very elegant imo.\r\n\r\n@lvwerra if you don't care about order, as I do, it's functional for now but I'd still quite like to get to the bottom of this.", "Found the ordering bug ! (`glob.glob` returning stuff in arbitrary order)", "I fixed the tqdm to be less misleading, but it can't tell where to stop. I am a bit hesitant to add a top-level tqdm (on the shard iterator) since for most intents it will do 0 -> N shards straight, but I am not sure what is the best way to present that info here.", "I'm continuing the PR :)", "Did a few changes:\r\n- make shards naming consistent:\r\n - use `{builder_name}-{split_name}.{file_format}` when there's only 1 shard\r\n - otherwise use `{builder_name}-{split_name}-{shard_idx:05d}-of-{num_shards:05d}.{file_format}`\r\n- update the reader to support reading several shards\r\n - added a new `shard_lengths` field in `SplitInfo` (FYI it is saved in `dataset_info.json` next to the shards as usual)\r\n - it's None when there's only 1 shard\r\n - otherwise it's a list of integers that correspond to the number of rows per shard\r\n - implemented partial reading to only memory map the required shards\r\n - e.g. when someone asks for a partial split like `train[:10%]`\r\n- align the sharding for beam datasets\r\n - no more combining into 1 big arrow file\r\n- added a tqdm bar\r\n - only one single bar, handled by the main process\r\n - gathers progress updates from other processes using `iflatmap_unordered`\r\n - shows the number of examples (even for datasets prepared by generating arrow tables)\r\n- disabled multiprocessing by default - users must pass `num_proc` explicitly\r\n- tests\r\n- docs", "Alright this is ready for review - sorry it ended up so big ^^'\r\n\r\nIf I can do anything to make it easier for your to review this PR @mariosasko let me know", "Multiprocessing is disabled by default but we may show a warning to encourage users to pass `num_proc` if the dataset is split in many files. Let me know what you think", "Hey, is this error seems to you guys natural? \r\n\r\nThe package built from `0d4e3907` commit tag, and here is the version displayed from the import ... \r\n```bash\r\n>>> datasets.__version__\r\n'2.6.1.dev0'\r\n>>> \r\n```\r\n\r\n```bash\r\n>>> data = load_dataset('dataset_loaders/rfw2latentplay', num_proc=14)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/somewhere//mambaforge/envs/datasets/lib/python3.8/site-packages/datasets/load.py\", line 1719, in load_dataset\r\n builder_instance = load_dataset_builder(\r\n File \"/somewhere//mambaforge/envs/datasets/lib/python3.8/site-packages/datasets/load.py\", line 1523, in load_dataset_builder\r\n builder_instance: DatasetBuilder = builder_cls(\r\n File \"/somewhere//mambaforge/envs/datasets/lib/python3.8/site-packages/datasets/builder.py\", line 1292, in __init__\r\n super().__init__(*args, **kwargs)\r\n File \"/somewhere//mambaforge/envs/datasets/lib/python3.8/site-packages/datasets/builder.py\", line 303, in __init__\r\n self.config, self.config_id = self._create_builder_config(\r\n File \"/somewhere//mambaforge/envs/datasets/lib/python3.8/site-packages/datasets/builder.py\", line 456, in _create_builder_config\r\n builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)\r\nTypeError: __init__() got an unexpected keyword argument 'num_proc'\r\n```\r\n\r\nLet me know if I can help fixing this ... \r\n", "> Do we have some benchmarks to see the speed-up?\r\n\r\nOn my machine running `load_dataset(\"oscar-corpus/OSCAR-2201\", \"br\")` (which is split in shards) I go from 2-3k examples per sec to 4-5k examples per sec with num_proc=2 😉", "> Hey, is this error seems to you guys natural?\r\n>\r\n> The package built from 0d4e3907 commit tag, and here is the version displayed from the import ...\r\n\r\nI don't know where you got the `0d4e3907` commit tag from, it doesn't seem to be in this PR. You should try installing from this PR, or wait for it to be merged on `main`", "## Splits vs Shards\r\n\r\nMaybe it's a good idea to add some documentation on the `sharding` that can be achieved by passing `list` based arguments to the `SplitGenerator`s `gen_kwargs` ... \r\n\r\nI had to read the whole dataset generation source code to find this out ... \r\n\r\n\r\n", "> Maybe it's a good idea to add some documentation on the sharding that can be achieved by passing list based arguments to the SplitGenerators gen_kwargs ...\r\n\r\nThis is part of this PR :) you can check the changes in docs/source/dataset_script.mdx", "I took your comments into account @mariosasko thanks !\r\nLet me know if it's good for you now ;)", "The doc CI should be fixed by now hopefully, merging !" ]
2022-10-12T19:59:17Z
2022-12-01T15:37:09Z
2022-11-09T17:11:43Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5107.diff", "html_url": "https://github.com/huggingface/datasets/pull/5107", "merged_at": "2022-11-09T17:11:43Z", "patch_url": "https://github.com/huggingface/datasets/pull/5107.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5107" }
This PR adds the multiprocessing part of #2650 (but not the caching of already-computed arrow files). On the other side, loading of sharded arrow files still needs to be implemented (sharded parquet files can already be loaded).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5107/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5107/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5106
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5106/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5106/comments
https://api.github.com/repos/huggingface/datasets/issues/5106/events
https://github.com/huggingface/datasets/pull/5106
1,406,635,758
PR_kwDODunzps5ArM6G
5,106
Fix task template reload from dict
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "> Just wondering if there might be other data classes default values missed that could cause an issue... Apart from feature-like classes and tasks, I don't see any others though...\r\n\r\nI think we're good ! `asdict` is used on the DatasetInfo attributes like features, tasks etc. and they all support dict conversion properly now\r\n\r\n> And a question: but this information about the tasks is no longer being saved as YAML tags in the dataset card; won't be a problem with current datasets using task templates (with this information in their metadata JSON) once we replace the JSON by the YAML tags (which do not have this information about the task templates)?\r\n\r\nIn the long run we'll use the train_eval_index YAML tags instead, but I agree when removing the JSON files we should try to not break existing code that may rely on this" ]
2022-10-12T18:33:49Z
2022-10-13T09:59:07Z
2022-10-13T09:56:51Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5106.diff", "html_url": "https://github.com/huggingface/datasets/pull/5106", "merged_at": "2022-10-13T09:56:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/5106.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5106" }
Since #4926 the JSON dumps are simplified and it made task template dicts empty by default. I fixed this by always including the task name which is needed to reload a task from a dict
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5106/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5106/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5105
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5105/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5105/comments
https://api.github.com/repos/huggingface/datasets/issues/5105/events
https://github.com/huggingface/datasets/issues/5105
1,406,078,357
I_kwDODunzps5Tzw2V
5,105
Specifying an exisiting folder in download_and_prepare deletes everything in it
{ "avatar_url": "https://avatars.githubusercontent.com/u/3664563?v=4", "events_url": "https://api.github.com/users/cakiki/events{/privacy}", "followers_url": "https://api.github.com/users/cakiki/followers", "following_url": "https://api.github.com/users/cakiki/following{/other_user}", "gists_url": "https://api.github.com/users/cakiki/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cakiki", "id": 3664563, "login": "cakiki", "node_id": "MDQ6VXNlcjM2NjQ1NjM=", "organizations_url": "https://api.github.com/users/cakiki/orgs", "received_events_url": "https://api.github.com/users/cakiki/received_events", "repos_url": "https://api.github.com/users/cakiki/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cakiki/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cakiki/subscriptions", "type": "User", "url": "https://api.github.com/users/cakiki" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "cc @lhoestq ", "Thanks for reporting, @cakiki.\r\n\r\nI would say the deletion of the dir is an expected behavior though...", "`dask.to_parquet` has an \"overwrite\" parameter and default is `False`, we could also have something similar", "Thank you both for your feedback!\r\n\r\n@albertvillanova I think I might have have the wrong mental model of what the function was meant to do. I thought it would be an API similar to the pandas `to_XX` write methods (Like the one @lhoestq mentions) so I just assumed it would download the dataframe to whichever folder I specififed (`\"./\"` in my case) so I could load it into a dask dataframe. I absolutely did not expect it to delete everything in my local directory, including the script where I called it from :smile: \r\n\r\nI think Quentin's proposed solution sounds like a reasonable feature!", "actually there's already a `download_mode` parameter that defaults to `REUSE_DATASET_IF_EXISTS` - so I guess it's just a matter of not deleting files unrelated to the dataset, and to overwrite existing dataset files if the download mode is `REUSE_CACHE_IF_EXISTS` or `FORCE_REDOWNLOAD`" ]
2022-10-12T11:53:33Z
2022-10-20T11:53:59Z
null
CONTRIBUTOR
null
null
null
## Describe the bug The builder correctly creates the `output_dir` folder if it doesn't exist, but if the folder exists everything within it is deleted. Specifying `"."` as the `output_dir` deletes everything in your current dir but also leads to **another bug** whose traceback is the following: ``` Traceback (most recent call last) Input In [11], in <cell line: 1>() ----> 1 rotten_tomatoes_builder.download_and_prepare(output_dir=".", max_shard_size="200MB", file_format="parquet") File ~/BIGSCIENCE/env/lib/python3.9/site-packages/datasets/builder.py:818, in download_and_prepare(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, storage_options, **download_and_prepare_kwargs) File /usr/lib/python3.9/contextlib.py:124, in _GeneratorContextManager.__exit__(self, type, value, traceback) 122 if type is None: 123 try: --> 124 next(self.gen) 125 except StopIteration: 126 return False File ~/BIGSCIENCE/env/lib/python3.9/site-packages/datasets/builder.py:760, in incomplete_dir(dirname) File /usr/lib/python3.9/shutil.py:722, in rmtree(path, ignore_errors, onerror) 720 os.rmdir(path) 721 except OSError: --> 722 onerror(os.rmdir, path, sys.exc_info()) 723 else: 724 try: 725 # symlinks to directories are forbidden, see bug #1669 File /usr/lib/python3.9/shutil.py:720, in rmtree(path, ignore_errors, onerror) 718 _rmtree_safe_fd(fd, path, onerror) 719 try: --> 720 os.rmdir(path) 721 except OSError: 722 onerror(os.rmdir, path, sys.exc_info()) OSError: [Errno 22] Invalid argument: '/home/christopher/BIGSCIENCE/.' ``` ## Steps to reproduce the bug ```python rotten_tomatoes_builder = load_dataset_builder("rotten_tomatoes") rotten_tomatoes_builder.download_and_prepare(output_dir="./test_folder", max_shard_size="200MB", file_format="parquet") ``` If `test_folder` contains any files they will all be deleted ## Expected results Either a warning that all files will be deleted, but preferably that they not be deleted at all. ## Actual results N/A ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.3.2 - Platform: Linux-5.15.0-48-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5105/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5105/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5104
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5104/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5104/comments
https://api.github.com/repos/huggingface/datasets/issues/5104/events
https://github.com/huggingface/datasets/pull/5104
1,405,973,102
PR_kwDODunzps5Ao9Mq
5,104
Fix loading how to guide (#5102)
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-12T10:34:42Z
2022-10-12T11:34:07Z
2022-10-12T11:31:55Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5104.diff", "html_url": "https://github.com/huggingface/datasets/pull/5104", "merged_at": "2022-10-12T11:31:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/5104.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5104" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5104/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5104/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5103
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5103/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5103/comments
https://api.github.com/repos/huggingface/datasets/issues/5103/events
https://github.com/huggingface/datasets/pull/5103
1,405,956,311
PR_kwDODunzps5Ao5gI
5,103
url encode hub url (#5099)
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-12T10:22:12Z
2022-10-12T15:27:24Z
2022-10-12T15:24:47Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5103.diff", "html_url": "https://github.com/huggingface/datasets/pull/5103", "merged_at": "2022-10-12T15:24:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/5103.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5103" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5103/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5103/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5102
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5102/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5102/comments
https://api.github.com/repos/huggingface/datasets/issues/5102/events
https://github.com/huggingface/datasets/issues/5102
1,404,746,554
I_kwDODunzps5Turs6
5,102
Error in create a dataset from a Python generator
{ "avatar_url": "https://avatars.githubusercontent.com/u/9004682?v=4", "events_url": "https://api.github.com/users/yangxuhui/events{/privacy}", "followers_url": "https://api.github.com/users/yangxuhui/followers", "following_url": "https://api.github.com/users/yangxuhui/following{/other_user}", "gists_url": "https://api.github.com/users/yangxuhui/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yangxuhui", "id": 9004682, "login": "yangxuhui", "node_id": "MDQ6VXNlcjkwMDQ2ODI=", "organizations_url": "https://api.github.com/users/yangxuhui/orgs", "received_events_url": "https://api.github.com/users/yangxuhui/received_events", "repos_url": "https://api.github.com/users/yangxuhui/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yangxuhui/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yangxuhui/subscriptions", "type": "User", "url": "https://api.github.com/users/yangxuhui" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" } ]
null
[ "Hi, thanks for reporting! The last line should be `dataset = Dataset.from_generator(my_gen)`.", "Can I work on this one?" ]
2022-10-11T14:28:58Z
2022-10-12T11:31:56Z
2022-10-12T11:31:56Z
NONE
null
null
null
## Describe the bug In HOW-TO-GUIDES > Load > [Python generator](https://huggingface.co/docs/datasets/v2.5.2/en/loading#python-generator), the code example defines the `my_gen` function, but when creating the dataset, an undefined `my_dict` is passed in. ```Python >>> from datasets import Dataset >>> def my_gen(): ... for i in range(1, 4): ... yield {"a": i} >>> dataset = Dataset.from_generator(my_dict) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5102/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5102/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5101
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5101/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5101/comments
https://api.github.com/repos/huggingface/datasets/issues/5101/events
https://github.com/huggingface/datasets/pull/5101
1,404,513,085
PR_kwDODunzps5AkHJc
5,101
Free the "hf" filesystem protocol for `hffs`
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-11T11:57:21Z
2022-10-12T15:32:59Z
2022-10-12T15:30:38Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5101.diff", "html_url": "https://github.com/huggingface/datasets/pull/5101", "merged_at": "2022-10-12T15:30:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/5101.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5101" }
null
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5101/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5101/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5100
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5100/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5100/comments
https://api.github.com/repos/huggingface/datasets/issues/5100/events
https://github.com/huggingface/datasets/issues/5100
1,404,458,586
I_kwDODunzps5TtlZa
5,100
datasets[s3] sagemaker can't run a model - datasets issue with Value and ClassLabel and cast() method
{ "avatar_url": "https://avatars.githubusercontent.com/u/115545475?v=4", "events_url": "https://api.github.com/users/jagochi/events{/privacy}", "followers_url": "https://api.github.com/users/jagochi/followers", "following_url": "https://api.github.com/users/jagochi/following{/other_user}", "gists_url": "https://api.github.com/users/jagochi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jagochi", "id": 115545475, "login": "jagochi", "node_id": "U_kgDOBuMVgw", "organizations_url": "https://api.github.com/users/jagochi/orgs", "received_events_url": "https://api.github.com/users/jagochi/received_events", "repos_url": "https://api.github.com/users/jagochi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jagochi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jagochi/subscriptions", "type": "User", "url": "https://api.github.com/users/jagochi" }
[]
closed
false
null
[]
null
[]
2022-10-11T11:16:31Z
2022-10-11T13:48:26Z
2022-10-11T13:48:26Z
NONE
null
null
null
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5100/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5100/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5099
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5099/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5099/comments
https://api.github.com/repos/huggingface/datasets/issues/5099/events
https://github.com/huggingface/datasets/issues/5099
1,404,370,191
I_kwDODunzps5TtP0P
5,099
datasets doesn't support # in data paths
{ "avatar_url": "https://avatars.githubusercontent.com/u/44069155?v=4", "events_url": "https://api.github.com/users/loubnabnl/events{/privacy}", "followers_url": "https://api.github.com/users/loubnabnl/followers", "following_url": "https://api.github.com/users/loubnabnl/following{/other_user}", "gists_url": "https://api.github.com/users/loubnabnl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/loubnabnl", "id": 44069155, "login": "loubnabnl", "node_id": "MDQ6VXNlcjQ0MDY5MTU1", "organizations_url": "https://api.github.com/users/loubnabnl/orgs", "received_events_url": "https://api.github.com/users/loubnabnl/received_events", "repos_url": "https://api.github.com/users/loubnabnl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/loubnabnl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/loubnabnl/subscriptions", "type": "User", "url": "https://api.github.com/users/loubnabnl" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" } ]
null
[ "`datasets` doesn't seem to urlencode the directory names here\r\n\r\nhttps://github.com/huggingface/datasets/blob/7feeb5648a63b6135a8259dedc3b1e19185ee4c7/src/datasets/utils/file_utils.py#L109-L111\r\n\r\nfor example we should have\r\n```python\r\nfrom datasets.utils.file_utils import hf_hub_url\r\n\r\nurl = hf_hub_url(\"loubnabnl/bigcode_csharp\", \"data/c#/data_0003.jsonl\")\r\nprint(url)\r\n# Currently returns\r\n# https://huggingface.co/datasets/loubnabnl/bigcode_csharp/resolve/main/data/c#/data_0003.jsonl\r\n# while it should be \r\n# https://huggingface.co/datasets/loubnabnl/bigcode_csharp/resolve/main/data/c%23/data_0003.jsonl\r\n```", "I'll work on this :)", "@loubnabnl The dataset you linked in the description of the bug does not work and returns a 404. Where can I find the dataset to reproduce the bug?", "I think you can create a dataset repository on the Hub with a dummy file containing a `#`", "Ah sorry it was private I just made it public, I can also help with this if needed", "@lhoestq Should I url encode also repo_id and revision parameters? I'm not sure what are the valid characters there.\r\n\r\nPersonally, I would be cautious and only url encode the path parameter.", "These are possible solutions (assuming `from urllib.parse import quote`):\r\n\r\n1) url encode only the path parameter:\r\n```\r\n# src/datasets/utils/file_utils.py\r\ndef hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:\r\n revision = revision or config.HUB_DEFAULT_VERSION\r\n return config.HUB_DATASETS_URL.format(repo_id=repo_id, path=quote(path), revision=revision)\r\n```\r\n2) url encode all parameters:\r\n```\r\n# src/datasets/utils/file_utils.py\r\ndef hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:\r\n revision = revision or config.HUB_DEFAULT_VERSION\r\n return config.HUB_DATASETS_URL.format(repo_id=quote(repo_id), path=quote(path), revision=quote(revision))\r\n```\r\n3) url encode the whole url:\r\n```\r\n# src/datasets/config.py\r\nHUB_DATASETS_PATH = \"/datasets/{repo_id}/resolve/{revision}/{path}\"\r\nHUB_DATASETS_URL = HF_ENDPOINT + HUB_DATASETS_PATH\r\n```\r\n```\r\n# src/datasets/utils/file_utils.py\r\ndef hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:\r\n revision = revision or config.HUB_DEFAULT_VERSION\r\n return config.HF_ENDPOINT + quote(config.HUB_DATASETS_PATH.format(repo_id=repo_id, path=path, revision=revision))\r\n```", "repo_id can only contain alphanumeric characters and _- so it doesn't need to be encoded.\r\n\r\nHowever I agree it's a good idea to also apply `quote` to the revision as well as in 2. !", "Should be fixed by https://github.com/huggingface/datasets/issues/5099 - we'll do a release later today" ]
2022-10-11T10:05:32Z
2022-10-13T13:14:20Z
2022-10-13T13:14:20Z
NONE
null
null
null
## Describe the bug dataset files with `#` symbol their paths aren't read correctly. ## Steps to reproduce the bug The data in folder `c#`of this [dataset](https://huggingface.co/datasets/loubnabnl/bigcode_csharp) can't be loaded. While the folder `c_sharp` with the same data is loaded properly ```python ds = load_dataset('loubnabnl/bigcode_csharp', split="train", data_files=["data/c#/*"]) ``` ``` FileNotFoundError: Couldn't find file at https://huggingface.co/datasets/loubnabnl/bigcode_csharp/resolve/27a3166cff4bb18e11919cafa6f169c0f57483de/data/c#/data_0003.jsonl ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.2 - Platform: macOS-12.2.1-arm64-arm-64bit - Python version: 3.9.13 - PyArrow version: 9.0.0 - Pandas version: 1.4.3 cc @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5099/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5099/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5098
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5098/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5098/comments
https://api.github.com/repos/huggingface/datasets/issues/5098/events
https://github.com/huggingface/datasets/issues/5098
1,404,058,518
I_kwDODunzps5TsDuW
5,098
Classes label error when loading symbolic links using imagefolder
{ "avatar_url": "https://avatars.githubusercontent.com/u/49552732?v=4", "events_url": "https://api.github.com/users/horizon86/events{/privacy}", "followers_url": "https://api.github.com/users/horizon86/followers", "following_url": "https://api.github.com/users/horizon86/following{/other_user}", "gists_url": "https://api.github.com/users/horizon86/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/horizon86", "id": 49552732, "login": "horizon86", "node_id": "MDQ6VXNlcjQ5NTUyNzMy", "organizations_url": "https://api.github.com/users/horizon86/orgs", "received_events_url": "https://api.github.com/users/horizon86/received_events", "repos_url": "https://api.github.com/users/horizon86/repos", "site_admin": false, "starred_url": "https://api.github.com/users/horizon86/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/horizon86/subscriptions", "type": "User", "url": "https://api.github.com/users/horizon86" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" } ]
null
[ "It can be solved temporarily by remove `resolve` in \r\nhttps://github.com/huggingface/datasets/blob/bef23be3d9543b1ca2da87ab2f05070201044ddc/src/datasets/data_files.py#L278", "Hi, thanks for reporting and suggesting a fix! We still need to account for `.`/`..` in the file path, so a more robust fix would be `Path(os.path.abspath(filepath))`.", "> Hi, thanks for reporting and suggesting a fix! We still need to account for `.`/`..` in the file path, so a more robust fix would be `Path(os.path.abspath(filepath))`.\r\n\r\nThanks for your reply!" ]
2022-10-11T06:10:58Z
2022-11-14T14:40:20Z
2022-11-14T14:40:20Z
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** Like this: #4015 When there are **symbolic links** to pictures in the data folder, the parent folder name of the **real file** will be used as the class name instead of the parent folder of the symbolic link itself. Can you give an option to decide whether to enable symbolic link tracking? This is inconsistent with the `torchvision.datasets.ImageFolder` behavior. For example: ![image](https://user-images.githubusercontent.com/49552732/195008591-3cce644e-aabe-4f39-90b9-832861cadb3d.png) ![image](https://user-images.githubusercontent.com/49552732/195008841-0b0c2289-eb7f-411a-977b-37426f23a277.png) It use `others` in green circle as class label but not `abnormal`, I wish `load_dataset` not use the real file parent as label. **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context about the feature request here.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5098/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5098/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5097
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5097/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5097/comments
https://api.github.com/repos/huggingface/datasets/issues/5097/events
https://github.com/huggingface/datasets/issues/5097
1,403,679,353
I_kwDODunzps5TqnJ5
5,097
Fatal error with pyarrow/libarrow.so
{ "avatar_url": "https://avatars.githubusercontent.com/u/11340846?v=4", "events_url": "https://api.github.com/users/catalys1/events{/privacy}", "followers_url": "https://api.github.com/users/catalys1/followers", "following_url": "https://api.github.com/users/catalys1/following{/other_user}", "gists_url": "https://api.github.com/users/catalys1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/catalys1", "id": 11340846, "login": "catalys1", "node_id": "MDQ6VXNlcjExMzQwODQ2", "organizations_url": "https://api.github.com/users/catalys1/orgs", "received_events_url": "https://api.github.com/users/catalys1/received_events", "repos_url": "https://api.github.com/users/catalys1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/catalys1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/catalys1/subscriptions", "type": "User", "url": "https://api.github.com/users/catalys1" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Thanks for reporting, @catalys1.\r\n\r\nThis seems a duplicate of:\r\n- #3310 \r\n\r\nThe source of the problem is in PyArrow:\r\n- [ARROW-15141: [C++] Fatal error condition occurred in aws_thread_launch](https://issues.apache.org/jira/browse/ARROW-15141)\r\n- [ARROW-17501: [C++] Fatal error condition occurred in aws_thread_launch](https://issues.apache.org/jira/browse/ARROW-17501)\r\n\r\nThe bug in their dependency is still unresolved:\r\n- https://github.com/aws/aws-sdk-cpp/issues/1809\r\n\r\nApparently, the `aws-sdk-cpp` PyArrow dependency needs to be pinned at version `1.8.186` if using conda. Have you updated it after installing PyArrow?\r\n```shell\r\nconda list aws-sdk-cpp\r\n```\r\n\r\nMaybe you should try to downgrade it to that version:\r\n```shell\r\nconda install -c conda-forge aws-sdk-cpp=1.8.186\r\n```" ]
2022-10-10T20:29:04Z
2022-10-11T06:56:01Z
2022-10-11T06:56:00Z
NONE
null
null
null
## Describe the bug When using datasets, at the very end of my jobs the program crashes (see trace below). It doesn't seem to affect anything, as it appears to happen as the program is closing down. Just importing `datasets` is enough to cause the error. ## Steps to reproduce the bug This is sufficient to reproduce the problem: ```bash python -c "import datasets" ``` ## Expected results Program should run to completion without an error. ## Actual results ```bash Fatal error condition occurred in /opt/vcpkg/buildtrees/aws-c-io/src/9e6648842a-364b708815.clean/source/event_loop.c:72: aws_thread_launch(&cleanup_thread, s_event_loop_destroy_async_thread_fn, el_group, &thread_options) == AWS_OP_SUCCESS Exiting Application ################################################################################ Stack trace: ################################################################################ /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x200af06) [0x150dff547f06] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x20028e5) [0x150dff53f8e5] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x1f27e09) [0x150dff464e09] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x200ba3d) [0x150dff548a3d] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x1f25948) [0x150dff462948] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x200ba3d) [0x150dff548a3d] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x1ee0b46) [0x150dff41db46] /u/user/miniconda3/envs/env/lib/python3.10/site-packages/pyarrow/libarrow.so.900(+0x194546a) [0x150dfee8246a] /lib64/libc.so.6(+0x39b0c) [0x150e15eadb0c] /lib64/libc.so.6(on_exit+0) [0x150e15eadc40] /u/user/miniconda3/envs/env/bin/python(+0x28db18) [0x560ae370eb18] /u/user/miniconda3/envs/env/bin/python(+0x28db4b) [0x560ae370eb4b] /u/user/miniconda3/envs/env/bin/python(+0x28db90) [0x560ae370eb90] /u/user/miniconda3/envs/env/bin/python(_PyRun_SimpleFileObject+0x1e6) [0x560ae37123e6] /u/user/miniconda3/envs/env/bin/python(_PyRun_AnyFileObject+0x44) [0x560ae37124c4] /u/user/miniconda3/envs/env/bin/python(Py_RunMain+0x35d) [0x560ae37135bd] /u/user/miniconda3/envs/env/bin/python(Py_BytesMain+0x39) [0x560ae37137d9] /lib64/libc.so.6(__libc_start_main+0xf3) [0x150e15e97493] /u/user/miniconda3/envs/env/bin/python(+0x2125d4) [0x560ae36935d4] Aborted (core dumped) ``` ## Environment info - `datasets` version: 2.5.1 - Platform: Linux-4.18.0-348.23.1.el8_5.x86_64-x86_64-with-glibc2.28 - Python version: 3.10.4 - PyArrow version: 9.0.0 - Pandas version: 1.4.3
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5097/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5097/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5096
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5096/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5096/comments
https://api.github.com/repos/huggingface/datasets/issues/5096/events
https://github.com/huggingface/datasets/issues/5096
1,403,379,816
I_kwDODunzps5TpeBo
5,096
Transfer some canonical datasets under an organization namespace
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "The transfer of the dummy dataset to the dummy org works as expected:\r\n```python\r\nIn [1]: from datasets import load_dataset; ds = load_dataset(\"dummy_canonical_dataset\", download_mode=\"force_redownload\"); ds\r\nDownloading builder script: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2.98k/2.98k [00:00<00:00, 2.01MB/s]\r\nDownloading and preparing dataset dummy_canonical_dataset/default (download: 411 bytes, generated: 385 bytes, post-processed: Unknown size, total: 796 bytes) to .../.cache/huggingface/datasets/dummy_canonical_dataset/default/1.0.0/100870c358637e269fee140585e61e1472d5075a9bf6f866719934c725e55fb4...\r\nDownloading data: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 411/411 [00:00<00:00, 293kB/s]\r\nDataset dummy_canonical_dataset downloaded and prepared to .../.cache/huggingface/datasets/dummy_canonical_dataset/default/1.0.0/100870c358637e269fee140585e61e1472d5075a9bf6f866719934c725e55fb4. Subsequent calls will reuse this data.\r\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 304.16it/s]\r\nOut[1]: \r\nDatasetDict({\r\n train: Dataset({\r\n features: ['langs', 'ner_tags', 'tokens'],\r\n num_rows: 3\r\n })\r\n})\r\n\r\nIn [2]: from datasets import load_dataset; ds = load_dataset(\"dummy-canonical-org/dummy_canonical_dataset\"); ds\r\nDownloading builder script: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2.98k/2.98k [00:00<00:00, 1.57MB/s]\r\nDownloading and preparing dataset dummy_canonical_dataset/default to .../.cache/huggingface/datasets/dummy-canonical-org___dummy_canonical_dataset/default/1.0.0/100870c358637e269fee140585e61e1472d5075a9bf6f866719934c725e55fb4...\r\nDataset dummy_canonical_dataset downloaded and prepared to .../.cache/huggingface/datasets/dummy-canonical-org___dummy_canonical_dataset/default/1.0.0/100870c358637e269fee140585e61e1472d5075a9bf6f866719934c725e55fb4. Subsequent calls will reuse this data.\r\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 362.48it/s]\r\nOut[2]: \r\nDatasetDict({\r\n train: Dataset({\r\n features: ['langs', 'ner_tags', 'tokens'],\r\n num_rows: 3\r\n })\r\n})\r\n```", "Cool ! 🚀 " ]
2022-10-10T15:44:31Z
2022-12-02T20:41:07Z
null
MEMBER
null
null
null
As discussed during our @huggingface/datasets meeting, we are planning to move some "canonical" dataset scripts under their corresponding organization namespace (if this does not exist). On the contrary, if the dataset already exists under the organization namespace, we are deprecating the canonical one (and eventually delete it). First, we should test it using a dummy dataset/organization. TODO: - [x] Test with a dummy dataset - [x] Create dummy canonical dataset: https://huggingface.co/datasets/dummy_canonical_dataset - [x] Create dummy organization: https://huggingface.co/dummy-canonical-org - [x] Transfer dummy canonical dataset to dummy organization - [ ] Transfer datasets - [x] qasper => allenai - [ ] multilingual_librispeech => facebook - It already exists "facebook/multilingual_librispeech" - [ ] oscar => oscar-corpus - [ ] gem => GEM - [ ] wmt14, wmt15, wmt16, wmt17, wmt18, wmt19,... => wmt - ...
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 2, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/5096/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5096/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5095
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5095/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5095/comments
https://api.github.com/repos/huggingface/datasets/issues/5095/events
https://github.com/huggingface/datasets/pull/5095
1,403,221,408
PR_kwDODunzps5Afzsq
5,095
Fix tutorial (#5093)
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[]
closed
false
null
[]
null
[ "Oops I merged without linking to the hacktoberfest issue - not sure if it counts in this case\r\n\r\nsorry about that..\r\n\r\nNext time you can just mention \"Close #XXXX\" in your issue to link it", "It should :) (the `hacktoberfest` repo topic is all that matters)" ]
2022-10-10T13:55:15Z
2022-10-10T17:50:52Z
2022-10-10T15:32:20Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5095.diff", "html_url": "https://github.com/huggingface/datasets/pull/5095", "merged_at": "2022-10-10T15:32:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/5095.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5095" }
Close #5093
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5095/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5095/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5094
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5094/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5094/comments
https://api.github.com/repos/huggingface/datasets/issues/5094/events
https://github.com/huggingface/datasets/issues/5094
1,403,214,950
I_kwDODunzps5To1xm
5,094
Multiprocessing with `Dataset.map` and `PyTorch` results in deadlock
{ "avatar_url": "https://avatars.githubusercontent.com/u/36822895?v=4", "events_url": "https://api.github.com/users/RR-28023/events{/privacy}", "followers_url": "https://api.github.com/users/RR-28023/followers", "following_url": "https://api.github.com/users/RR-28023/following{/other_user}", "gists_url": "https://api.github.com/users/RR-28023/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RR-28023", "id": 36822895, "login": "RR-28023", "node_id": "MDQ6VXNlcjM2ODIyODk1", "organizations_url": "https://api.github.com/users/RR-28023/orgs", "received_events_url": "https://api.github.com/users/RR-28023/received_events", "repos_url": "https://api.github.com/users/RR-28023/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RR-28023/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RR-28023/subscriptions", "type": "User", "url": "https://api.github.com/users/RR-28023" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "Hi ! Could it be an Out of Memory issue that could have killed one of the processes ? can you check your memory ?", "Hi! I don't think it is a memory issue. I'm monitoring the main and spawn python processes and threads with `htop` and the memory does not peak. Besides, the example I've posted above should not be that demanding in terms of memory, right? (I have 32GB of RAM). ", "Indeed it should be fine. I couldn't reproduce the error though - I ran your script on my side and it works fine. What version of pytorch are you using ?", "Interesting.. I'm using `torch 1.12.1`", "I also tried on colab and it works fine 🤔 \r\nMaybe something is wrong with your installation of pytorch ?", "Oh actually I just saw that you're using python 3.9\r\n\r\nThis could be related to https://github.com/huggingface/datasets/issues/4113\r\n\r\nWe'll fix that as soon as we can, in the meantime you can try to use use single process, or use an older version of python maybe ?", "I tried with python 3.7 and the issue persists. In collab, which also uses 3.7 I don't get the issue, so yes I guess is something on mu side... will post it here if I manage to fix it", "Hi! Which version of transformers are you using? I test the code on Colab (so python 3.7) with transformers 4.23.1, torch 1.12.1 and pyarrow 9.0.0 (also 6.x), it worked without stuck." ]
2022-10-10T13:50:56Z
2022-10-18T16:18:53Z
null
NONE
null
null
null
## Describe the bug There seems to be an issue with using multiprocessing with `datasets.Dataset.map` (i.e. setting `num_proc` to a value greater than one) combined with a function that uses `torch` under the hood. The subprocesses that `datasets.Dataset.map` spawns [a this step](https://github.com/huggingface/datasets/blob/1b935dab9d2f171a8c6294269421fe967eb55e34/src/datasets/arrow_dataset.py#L2663) go into wait mode forever. ## Steps to reproduce the bug The below code goes into deadlock when `NUMBER_OF_PROCESSES` is greater than one. ```python NUMBER_OF_PROCESSES = 2 from transformers import AutoTokenizer, AutoModel from datasets import load_dataset dataset = load_dataset("glue", "mrpc", split="train") tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") model.to("cpu") def cls_pooling(model_output): return model_output.last_hidden_state[:, 0] def generate_embeddings_batched(examples): sentences_batch = list(examples['sentence1']) encoded_input = tokenizer( sentences_batch, padding=True, truncation=True, return_tensors="pt" ) encoded_input = {k: v.to("cpu") for k, v in encoded_input.items()} model_output = model(**encoded_input) embeddings = cls_pooling(model_output) examples['embeddings'] = embeddings.detach().cpu().numpy() # 64, 384 return examples embeddings_dataset = dataset.map( generate_embeddings_batched, batched=True, batch_size=10, num_proc=NUMBER_OF_PROCESSES ) ``` While debugging it I've seen that it gets "stuck" when calling `torch.nn.Embedding.forward` but some testing shows that the same happens with other functions from `torch.nn`. ## Environment info - Platform: Linux-5.14.0-1052-oem-x86_64-with-glibc2.31 - Python version: 3.9.14 - PyArrow version: 9.0.0 - Pandas version: 1.5.0 Not sure if this is a HF problem, a PyTorch problem or something I'm doing wrong.. Thanks!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5094/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5094/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5093
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5093/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5093/comments
https://api.github.com/repos/huggingface/datasets/issues/5093/events
https://github.com/huggingface/datasets/issues/5093
1,402,939,660
I_kwDODunzps5TnykM
5,093
Mismatch between tutoriel and doc
{ "avatar_url": "https://avatars.githubusercontent.com/u/22726840?v=4", "events_url": "https://api.github.com/users/clefourrier/events{/privacy}", "followers_url": "https://api.github.com/users/clefourrier/followers", "following_url": "https://api.github.com/users/clefourrier/following{/other_user}", "gists_url": "https://api.github.com/users/clefourrier/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/clefourrier", "id": 22726840, "login": "clefourrier", "node_id": "MDQ6VXNlcjIyNzI2ODQw", "organizations_url": "https://api.github.com/users/clefourrier/orgs", "received_events_url": "https://api.github.com/users/clefourrier/received_events", "repos_url": "https://api.github.com/users/clefourrier/repos", "site_admin": false, "starred_url": "https://api.github.com/users/clefourrier/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/clefourrier/subscriptions", "type": "User", "url": "https://api.github.com/users/clefourrier" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" } ]
null
[ "Hi, thanks for reporting! This line should be replaced with \r\n```python\r\ndataset = dataset.map(lambda examples: tokenizer(examples[\"text\"], return_tensors=\"np\"), batched=True)\r\n```\r\nfor it to work (the `return_tensors` part inside the `tokenizer` call).", "Can I work on this?", "Fixed in https://github.com/huggingface/datasets/pull/5095" ]
2022-10-10T10:23:53Z
2022-10-10T17:51:15Z
2022-10-10T17:51:14Z
CONTRIBUTOR
null
null
null
## Describe the bug In the "Process text data" tutorial, [`map` has `return_tensors` as kwarg](https://huggingface.co/docs/datasets/main/en/nlp_process#map). It does not seem to appear in the [function documentation](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.map), nor to work. ## Steps to reproduce the bug MWE: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") from datasets import load_dataset dataset = load_dataset("lhoestq/demo1", split="train") dataset = dataset.map(lambda examples: tokenizer(examples["review"]), batched=True, return_tensors="pt") ``` ## Expected results return_tensors to be a valid kwarg :smiley: ## Actual results ```python >> TypeError: map() got an unexpected keyword argument 'return_tensors' ``` ## Environment info - `datasets` version: 2.3.2 - Platform: Linux-5.14.0-1052-oem-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 8.0.0 - Pandas version: 1.4.3
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5093/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5093/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5092
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5092/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5092/comments
https://api.github.com/repos/huggingface/datasets/issues/5092/events
https://github.com/huggingface/datasets/pull/5092
1,402,713,517
PR_kwDODunzps5AeIsS
5,092
Use HTML relative paths for tiles in the docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "> Good catch, @lewtun. Thanks for the fix.\r\n> \r\n> Do you know if there are other absolute paths in the docs that should be fixed as well?\r\n\r\nI found a few more in [0d4796b](https://github.com/huggingface/datasets/pull/5092/commits/0d4796b747e6620d9fcc17a8f74acc5cf4bba7be).\r\n\r\nHowever, I noticed that none of the cross-references (e.g. to API classes / methods) work locally, but that is probably just a limitation of the local build", "Thanks." ]
2022-10-10T07:24:27Z
2022-10-11T13:25:45Z
2022-10-11T13:23:23Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5092.diff", "html_url": "https://github.com/huggingface/datasets/pull/5092", "merged_at": "2022-10-11T13:23:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/5092.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5092" }
This PR replaces the absolute paths in the landing page tiles with relative ones so that one can test navigation both locally in and in future PRs (see [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5084/en/index) for an example PR where the links don't work). I encountered this while working on the `optimum` docs and figured I'd fix it elsewhere too :) Internal Slack thread: https://huggingface.slack.com/archives/C02GLJ5S0E9/p1665129710176619
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5092/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5092/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5091
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5091/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5091/comments
https://api.github.com/repos/huggingface/datasets/issues/5091/events
https://github.com/huggingface/datasets/pull/5091
1,401,112,552
PR_kwDODunzps5AZCm9
5,091
Allow connection objects in `from_sql` + small doc improvement
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-07T12:39:44Z
2022-10-09T13:19:15Z
2022-10-09T13:16:57Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5091.diff", "html_url": "https://github.com/huggingface/datasets/pull/5091", "merged_at": "2022-10-09T13:16:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/5091.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5091" }
Allow connection objects in `from_sql` (emit a warning that they are cachable) and add a tip that explains the format of the con parameter when provided as a URI string. PS: ~~This PR contains a parameter link, so https://github.com/huggingface/doc-builder/pull/311 needs to be merged before it's "ready for review".~~ Done!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5091/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5091/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5090
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5090/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5090/comments
https://api.github.com/repos/huggingface/datasets/issues/5090/events
https://github.com/huggingface/datasets/issues/5090
1,401,102,407
I_kwDODunzps5TgyBH
5,090
Review sync issues from GitHub to Hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "Nice!!" ]
2022-10-07T12:31:56Z
2022-10-08T07:07:36Z
2022-10-08T07:07:36Z
MEMBER
null
null
null
## Describe the bug We have discovered that sometimes there were sync issues between GitHub and Hub datasets, after a merge commit to main branch. For example: - this merge commit: https://github.com/huggingface/datasets/commit/d74a9e8e4bfff1fed03a4cab99180a841d7caf4b - was not properly synced with the Hub: https://github.com/huggingface/datasets/actions/runs/3002495269/jobs/4819769684 ``` [main 9e641de] Add Papers with Code ID to scifact dataset (#4941) Author: Albert Villanova del Moral <albertvillanova@users.noreply.huggingface.co> 1 file changed, 42 insertions(+), 14 deletions(-) push failed ! GitCommandError(['git', 'push'], 1, b'remote: ---------------------------------------------------------- \nremote: Sorry, your push was rejected during YAML metadata verification: \nremote: - Error: "license" does not match any of the allowed types \nremote: ---------------------------------------------------------- \nremote: Please find the documentation at: \nremote: https://huggingface.co/docs/hub/models-cards#model-card-metadata \nremote: ---------------------------------------------------------- \nTo [https://huggingface.co/datasets/scifact.git\n](https://huggingface.co/datasets/scifact.git/n) ! [remote rejected] main -> main (pre-receive hook declined)\nerror: failed to push some refs to \'[https://huggingface.co/datasets/scifact.git\](https://huggingface.co/datasets/scifact.git/)'', b'') ``` We are reviewing sync issues in previous commits to recover them and repushing to the Hub. TODO: Review - [x] #4941 - scifact - [x] #4931 - scifact - [x] #4753 - wikipedia - [x] #4554 - wmt17, wmt19, wmt_t2t - Fixed with "Release 2.4.0" commit: https://github.com/huggingface/datasets/commit/401d4c4f9b9594cb6527c599c0e7a72ce1a0ea49 - https://huggingface.co/datasets/wmt17/commit/5c0afa83fbbd3508ff7627c07f1b27756d1379ea - https://huggingface.co/datasets/wmt19/commit/b8ad5bf1960208a376a0ab20bc8eac9638f7b400 - https://huggingface.co/datasets/wmt_t2t/commit/b6d67191804dd0933476fede36754a436b48d1fc - [x] #4607 - [x] #4416 - lccc - Fixed with "Release 2.3.0" commit: https://huggingface.co/datasets/lccc/commit/8b1f8cf425b5653a0a4357a53205aac82ce038d1 - [x] #4367
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5090/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5090/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5089
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5089/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5089/comments
https://api.github.com/repos/huggingface/datasets/issues/5089/events
https://github.com/huggingface/datasets/issues/5089
1,400,788,486
I_kwDODunzps5TflYG
5,089
Resume failed process
{ "avatar_url": "https://avatars.githubusercontent.com/u/208336?v=4", "events_url": "https://api.github.com/users/felix-schneider/events{/privacy}", "followers_url": "https://api.github.com/users/felix-schneider/followers", "following_url": "https://api.github.com/users/felix-schneider/following{/other_user}", "gists_url": "https://api.github.com/users/felix-schneider/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/felix-schneider", "id": 208336, "login": "felix-schneider", "node_id": "MDQ6VXNlcjIwODMzNg==", "organizations_url": "https://api.github.com/users/felix-schneider/orgs", "received_events_url": "https://api.github.com/users/felix-schneider/received_events", "repos_url": "https://api.github.com/users/felix-schneider/repos", "site_admin": false, "starred_url": "https://api.github.com/users/felix-schneider/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/felix-schneider/subscriptions", "type": "User", "url": "https://api.github.com/users/felix-schneider" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[]
2022-10-07T08:07:03Z
2022-10-07T08:07:03Z
null
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** When a process (`map`, `filter`, etc.) crashes part-way through, you lose all progress. **Describe the solution you'd like** It would be good if the cache reflected the partial progress, so that after we restart the script, the process can restart where it left off. **Describe alternatives you've considered** Doing processing outside of `datasets`, by writing the dataset to json files and building a restart mechanism myself. **Additional context** N/A
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5089/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5089/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5088
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5088/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5088/comments
https://api.github.com/repos/huggingface/datasets/issues/5088/events
https://github.com/huggingface/datasets/issues/5088
1,400,530,412
I_kwDODunzps5TemXs
5,088
load_datasets("json", ...) don't read local .json.gz properly
{ "avatar_url": "https://avatars.githubusercontent.com/u/112650299?v=4", "events_url": "https://api.github.com/users/junwang-wish/events{/privacy}", "followers_url": "https://api.github.com/users/junwang-wish/followers", "following_url": "https://api.github.com/users/junwang-wish/following{/other_user}", "gists_url": "https://api.github.com/users/junwang-wish/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/junwang-wish", "id": 112650299, "login": "junwang-wish", "node_id": "U_kgDOBrboOw", "organizations_url": "https://api.github.com/users/junwang-wish/orgs", "received_events_url": "https://api.github.com/users/junwang-wish/received_events", "repos_url": "https://api.github.com/users/junwang-wish/repos", "site_admin": false, "starred_url": "https://api.github.com/users/junwang-wish/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/junwang-wish/subscriptions", "type": "User", "url": "https://api.github.com/users/junwang-wish" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "Hi @junwang-wish, thanks for reporting.\r\n\r\nUnfortunately, I'm not able to reproduce the bug. Which version of `datasets` are you using? Does the problem persist if you update `datasets`?\r\n```shell\r\npip install -U datasets\r\n``` ", "Thanks @albertvillanova I updated `datasets` from `2.5.1` to `2.5.2` and tested copying the `json.gz` to a different directory and my mind was blown:\r\n\r\n```python\r\nfpath = '/data/junwang/.cache/general/57b6f2314cbe0bc45dda5b78f0871df2/test.json.gz'\r\nds_panda = DatasetDict(\r\n test=Dataset.from_pandas(\r\n pd.read_json(fpath, lines=True)\r\n )\r\n)\r\nds_direct = load_dataset(\r\n 'json', data_files={\r\n 'test': fpath\r\n }, features=Features(\r\n text_input=Value(dtype=\"string\", id=None),\r\n text_output=Value(dtype=\"string\", id=None)\r\n )\r\n)\r\nlen(ds_panda['test']), len(ds_direct['test'])\r\n```\r\nproduces \r\n```python\r\nUsing custom data configuration default-0e6cf24134163e8b\r\nFound cached dataset json (/data/junwang/.cache/huggingface/datasets/json/default-0e6cf24134163e8b/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab)\r\n(1, 0)\r\n```\r\nbut then I ran below command to see if the same file in a different directory leads to same discrepancy\r\n```shell\r\ncp /data/junwang/.cache/general/57b6f2314cbe0bc45dda5b78f0871df2/test.json.gz tmp_test.json.gz\r\n```\r\nand so I ran\r\n```python\r\nfpath = 'tmp_test.json.gz'\r\nds_panda = DatasetDict(\r\n test=Dataset.from_pandas(\r\n pd.read_json(fpath, lines=True)\r\n )\r\n)\r\nds_direct = load_dataset(\r\n 'json', data_files={\r\n 'test': fpath\r\n }, features=Features(\r\n text_input=Value(dtype=\"string\", id=None),\r\n text_output=Value(dtype=\"string\", id=None)\r\n )\r\n)\r\nlen(ds_panda['test']), len(ds_direct['test'])\r\n```\r\nand behold, I get \r\n```python\r\nUsing custom data configuration default-f679b32ab0008520\r\nDownloading and preparing dataset json/default to /data/junwang/.cache/huggingface/datasets/json/default-f679b32ab0008520/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab...\r\nDataset json downloaded and prepared to /data/junwang/.cache/huggingface/datasets/json/default-f679b32ab0008520/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab. Subsequent calls will reuse this data.\r\n(1, 1)\r\n```\r\nThey match now !\r\n\r\nThis problem happens regardless of the shell I use (VScode jupyter extension or plain old Python REPL). \r\n\r\nI attached the `json.gz` here for reference: [test.json.gz](https://github.com/huggingface/datasets/files/9734843/test.json.gz)\r\n\r\n" ]
2022-10-07T02:16:58Z
2022-10-07T14:43:16Z
null
NONE
null
null
null
## Describe the bug I have a local file `*.json.gz` and it can be read by `pandas.read_json(lines=True)`, but cannot be read by `load_datasets("json")` (resulting in 0 lines) ## Steps to reproduce the bug ```python fpath = '/data/junwang/.cache/general/57b6f2314cbe0bc45dda5b78f0871df2/test.json.gz' ds_panda = DatasetDict( test=Dataset.from_pandas( pd.read_json(fpath, lines=True) ) ) ds_direct = load_dataset( 'json', data_files={ 'test': fpath }, features=Features( text_input=Value(dtype="string", id=None), text_output=Value(dtype="string", id=None) ) ) len(ds_panda['test']), len(ds_direct['test']) ``` ## Expected results Lines of `ds_panda['test']` and `ds_direct['test']` should match. ## Actual results ``` Using custom data configuration default-c0ef2598760968aa Downloading and preparing dataset json/default to /data/junwang/.cache/huggingface/datasets/json/default-c0ef2598760968aa/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab... Dataset json downloaded and prepared to /data/junwang/.cache/huggingface/datasets/json/default-c0ef2598760968aa/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab. Subsequent calls will reuse this data. (62087, 0) ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: - Platform: Ubuntu 18.04.4 LTS - Python version: 3.8.13 - PyArrow version: 9.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5088/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5088/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5087
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5087/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5087/comments
https://api.github.com/repos/huggingface/datasets/issues/5087/events
https://github.com/huggingface/datasets/pull/5087
1,400,487,967
PR_kwDODunzps5AW-N9
5,087
Fix filter with empty indices
{ "avatar_url": "https://avatars.githubusercontent.com/u/23029765?v=4", "events_url": "https://api.github.com/users/Mouhanedg56/events{/privacy}", "followers_url": "https://api.github.com/users/Mouhanedg56/followers", "following_url": "https://api.github.com/users/Mouhanedg56/following{/other_user}", "gists_url": "https://api.github.com/users/Mouhanedg56/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mouhanedg56", "id": 23029765, "login": "Mouhanedg56", "node_id": "MDQ6VXNlcjIzMDI5NzY1", "organizations_url": "https://api.github.com/users/Mouhanedg56/orgs", "received_events_url": "https://api.github.com/users/Mouhanedg56/received_events", "repos_url": "https://api.github.com/users/Mouhanedg56/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mouhanedg56/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mouhanedg56/subscriptions", "type": "User", "url": "https://api.github.com/users/Mouhanedg56" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-07T01:07:00Z
2022-10-07T18:43:03Z
2022-10-07T18:40:26Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5087.diff", "html_url": "https://github.com/huggingface/datasets/pull/5087", "merged_at": "2022-10-07T18:40:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/5087.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5087" }
Fix #5085
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5087/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5087/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5086
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5086/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5086/comments
https://api.github.com/repos/huggingface/datasets/issues/5086/events
https://github.com/huggingface/datasets/issues/5086
1,400,216,975
I_kwDODunzps5TdZ2P
5,086
HTTPError: 404 Client Error: Not Found for url
{ "avatar_url": "https://avatars.githubusercontent.com/u/54015474?v=4", "events_url": "https://api.github.com/users/km5ar/events{/privacy}", "followers_url": "https://api.github.com/users/km5ar/followers", "following_url": "https://api.github.com/users/km5ar/following{/other_user}", "gists_url": "https://api.github.com/users/km5ar/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/km5ar", "id": 54015474, "login": "km5ar", "node_id": "MDQ6VXNlcjU0MDE1NDc0", "organizations_url": "https://api.github.com/users/km5ar/orgs", "received_events_url": "https://api.github.com/users/km5ar/received_events", "repos_url": "https://api.github.com/users/km5ar/repos", "site_admin": false, "starred_url": "https://api.github.com/users/km5ar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/km5ar/subscriptions", "type": "User", "url": "https://api.github.com/users/km5ar" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "FYI @lewtun ", "Hi @km5ar, thanks for reporting.\r\n\r\nThis should be fixed in the notebook:\r\n- the filename `datasets-issues-with-hf-doc-builder.jsonl` no longer exists on the repo; instead, current filename is `datasets-issues-with-comments.jsonl`\r\n- see: https://huggingface.co/datasets/lewtun/github-issues/tree/main\r\n\r\nAnyway, depending on your version of `datasets`, you can now use:\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nissues_dataset = load_dataset(\"lewtun/github-issues\")\r\nissues_dataset\r\n```\r\ninstead of:\r\n```python\r\nfrom huggingface_hub import hf_hub_url\r\n\r\ndata_files = hf_hub_url(\r\n repo_id=\"lewtun/github-issues\",\r\n filename=\"datasets-issues-with-hf-doc-builder.jsonl\",\r\n repo_type=\"dataset\",\r\n)\r\nfrom datasets import load_dataset\r\n\r\nissues_dataset = load_dataset(\"json\", data_files=data_files, split=\"train\")\r\nissues_dataset\r\n```\r\n\r\nOutput:\r\n```python\r\nIn [25]: ds = load_dataset(\"lewtun/github-issues\")\r\nDownloading: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10.5k/10.5k [00:00<00:00, 5.75MB/s]\r\nUsing custom data configuration lewtun--github-issues-cff5093ecc410ea2\r\nDownloading and preparing dataset json/lewtun--github-issues to .../.cache/huggingface/datasets/lewtun___json/lewtun--github-issues-cff5093ecc410ea2/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab...\r\nDownloading data: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12.2M/12.2M [00:00<00:00, 26.5MB/s]\r\nDownloading data files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:02<00:00, 2.70s/it]\r\nExtracting data files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1589.96it/s]\r\nDataset json downloaded and prepared to .../.cache/huggingface/datasets/lewtun___json/lewtun--github-issues-cff5093ecc410ea2/0.0.0/e6070c77f18f01a5ad4551a8b7edfba20b8438b7cad4d94e6ad9378022ce4aab. Subsequent calls will reuse this data.\r\n100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 133.95it/s]\r\n\r\nIn [26]: ds\r\nOut[26]: \r\nDatasetDict({\r\n train: Dataset({\r\n features: ['url', 'repository_url', 'labels_url', 'comments_url', 'events_url', 'html_url', 'id', 'node_id', 'number', 'title', 'user', 'labels', 'state', 'locked', 'assignee', 'assignees', 'milestone', 'comments', 'created_at', 'updated_at', 'closed_at', 'author_association', 'active_lock_reason', 'pull_request', 'body', 'timeline_url', 'performed_via_github_app', 'is_pull_request'],\r\n num_rows: 3019\r\n })\r\n})\r\n```", "Thanks for reporting @km5ar and thank you @albertvillanova for the quick solution! I'll post a fix on the source too" ]
2022-10-06T19:48:58Z
2022-10-07T15:12:01Z
2022-10-07T15:12:01Z
NONE
null
null
null
## Describe the bug I was following chap 5 from huggingface course: https://huggingface.co/course/chapter5/6?fw=tf However, I'm not able to download the datasets, with a 404 erros <img width="1160" alt="iShot2022-10-06_15 54 50" src="https://user-images.githubusercontent.com/54015474/194406327-ae62c2f3-1da5-4686-8631-13d879a0edee.png"> ## Steps to reproduce the bug ```python from huggingface_hub import hf_hub_url data_files = hf_hub_url( repo_id="lewtun/github-issues", filename="datasets-issues-with-hf-doc-builder.jsonl", repo_type="dataset", ) from datasets import load_dataset issues_dataset = load_dataset("json", data_files=data_files, split="train") issues_dataset ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.2 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.9.12 - PyArrow version: 9.0.0 - Pandas version: 1.4.4
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5086/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5086/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5085
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5085/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5085/comments
https://api.github.com/repos/huggingface/datasets/issues/5085/events
https://github.com/huggingface/datasets/issues/5085
1,400,113,569
I_kwDODunzps5TdAmh
5,085
Filtering on an empty dataset returns a corrupted dataset.
{ "avatar_url": "https://avatars.githubusercontent.com/u/36087158?v=4", "events_url": "https://api.github.com/users/gabegma/events{/privacy}", "followers_url": "https://api.github.com/users/gabegma/followers", "following_url": "https://api.github.com/users/gabegma/following{/other_user}", "gists_url": "https://api.github.com/users/gabegma/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gabegma", "id": 36087158, "login": "gabegma", "node_id": "MDQ6VXNlcjM2MDg3MTU4", "organizations_url": "https://api.github.com/users/gabegma/orgs", "received_events_url": "https://api.github.com/users/gabegma/received_events", "repos_url": "https://api.github.com/users/gabegma/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gabegma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gabegma/subscriptions", "type": "User", "url": "https://api.github.com/users/gabegma" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/23029765?v=4", "events_url": "https://api.github.com/users/Mouhanedg56/events{/privacy}", "followers_url": "https://api.github.com/users/Mouhanedg56/followers", "following_url": "https://api.github.com/users/Mouhanedg56/following{/other_user}", "gists_url": "https://api.github.com/users/Mouhanedg56/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mouhanedg56", "id": 23029765, "login": "Mouhanedg56", "node_id": "MDQ6VXNlcjIzMDI5NzY1", "organizations_url": "https://api.github.com/users/Mouhanedg56/orgs", "received_events_url": "https://api.github.com/users/Mouhanedg56/received_events", "repos_url": "https://api.github.com/users/Mouhanedg56/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mouhanedg56/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mouhanedg56/subscriptions", "type": "User", "url": "https://api.github.com/users/Mouhanedg56" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/23029765?v=4", "events_url": "https://api.github.com/users/Mouhanedg56/events{/privacy}", "followers_url": "https://api.github.com/users/Mouhanedg56/followers", "following_url": "https://api.github.com/users/Mouhanedg56/following{/other_user}", "gists_url": "https://api.github.com/users/Mouhanedg56/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mouhanedg56", "id": 23029765, "login": "Mouhanedg56", "node_id": "MDQ6VXNlcjIzMDI5NzY1", "organizations_url": "https://api.github.com/users/Mouhanedg56/orgs", "received_events_url": "https://api.github.com/users/Mouhanedg56/received_events", "repos_url": "https://api.github.com/users/Mouhanedg56/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mouhanedg56/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mouhanedg56/subscriptions", "type": "User", "url": "https://api.github.com/users/Mouhanedg56" } ]
null
[ "~~It seems like #5043 fix (merged recently) is the root cause of such behaviour. When we empty indices mapping (because the dataset length equals to zero), we can no longer get column item like: `ds_filter_2['sentence']` which uses\r\n`ds_filter_1._indices.column(0)`~~\r\n\r\n**UPDATE:**\r\nEmpty datasets are returned without going through partial function on `map` method, which will not work to get indices for `filter`: we need to run `get_indices_from_mask_function` partial function on the dataset to get output = `{\"indices\": []}`. But this is complicated since functions used in args, in particular `get_indices_from_mask_function`, do not support empty datasets.\r\nWe can just handle empty datasets aside on filter method.", "#self-assign", "Thank you for solving this amazingly quickly!" ]
2022-10-06T18:18:49Z
2022-10-07T19:06:02Z
2022-10-07T18:40:26Z
NONE
null
null
null
## Describe the bug When filtering a dataset twice, where the first result is an empty dataset, the second dataset seems corrupted. ## Steps to reproduce the bug ```python datasets = load_dataset("glue", "sst2") dataset_split = datasets['validation'] ds_filter_1 = dataset_split.filter(lambda x: False) # Some filtering condition that leads to an empty dataset assert ds_filter_1.num_rows == 0 sentences = ds_filter_1['sentence'] assert len(sentences) == 0 ds_filter_2 = ds_filter_1.filter(lambda x: False) # Some other filtering condition assert ds_filter_2.num_rows == 0 assert 'sentence' in ds_filter_2.column_names sentences = ds_filter_2['sentence'] ``` ## Expected results The last line should be returning an empty list, same as 4 lines above. ## Actual results The last line currently raises `IndexError: index out of bounds`. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.2 - Platform: macOS-11.6.6-x86_64-i386-64bit - Python version: 3.9.11 - PyArrow version: 7.0.0 - Pandas version: 1.4.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 3, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/5085/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5085/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5084
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5084/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5084/comments
https://api.github.com/repos/huggingface/datasets/issues/5084/events
https://github.com/huggingface/datasets/pull/5084
1,400,016,229
PR_kwDODunzps5AVXwm
5,084
IterableDataset formatting in numpy/torch/tf/jax
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
open
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5084). All of your documentation changes will be reflected on that endpoint." ]
2022-10-06T16:53:38Z
2022-10-10T13:21:52Z
null
MEMBER
null
true
{ "diff_url": "https://github.com/huggingface/datasets/pull/5084.diff", "html_url": "https://github.com/huggingface/datasets/pull/5084", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5084.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5084" }
This code now returns a numpy array: ```python from datasets import load_dataset ds = load_dataset("imagenet-1k", split="train", streaming=True).with_format("np") print(next(iter(ds))["image"]) ``` It also works with "arrow", "pandas", "torch", "tf" and "jax" ### Implementation details: I'm using the existing code to format an Arrow Table to the right output format for simplicity. Therefore it's probbaly not the most optimized approach. For example to output PyTorch tensors it does this for every example: python data -> arrow table -> numpy extracted data -> pytorch formatted data ### Releasing this feature Even though I consider this as a bug/inconsistency, this change is a breaking change. And I'm sure some users were relying on the torch iterable dataset to return PIL Image and used data collators to convert to pytorch. So I guess this is `datasets` 3.0 ? ### TODO - [x] merge https://github.com/huggingface/datasets/pull/5072 - [ ] docs - [ ] tests Close https://github.com/huggingface/datasets/issues/5083
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5084/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5084/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5083
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5083/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5083/comments
https://api.github.com/repos/huggingface/datasets/issues/5083/events
https://github.com/huggingface/datasets/issues/5083
1,399,842,514
I_kwDODunzps5Tb-bS
5,083
Support numpy/torch/tf/jax formatting for IterableDataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "fef2c0", "default": false, "description": "", "id": 3287858981, "name": "streaming", "node_id": "MDU6TGFiZWwzMjg3ODU4OTgx", "url": "https://api.github.com/repos/huggingface/datasets/labels/streaming" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
[]
2022-10-06T15:14:58Z
2022-10-06T15:42:27Z
null
MEMBER
null
null
null
Right now `IterableDataset` doesn't do any formatting. Only the "torch" format can be used to make the dataset inherit from `torch.data.IterableDataset` and make it work with a torch DataLoader. In particular this code should return a numpy array: ```python from datasets import load_dataset ds = load_dataset("imagenet-1k", split="train", streaming=True).with_format("np") print(next(iter(ds))["image"]) ``` Right now it returns a PIL.Image. Setting `streaming=False` does return a numpy array after #5072
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5083/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5083/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5082
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5082/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5082/comments
https://api.github.com/repos/huggingface/datasets/issues/5082/events
https://github.com/huggingface/datasets/pull/5082
1,399,379,777
PR_kwDODunzps5ATJv-
5,082
adding keep in memory
{ "avatar_url": "https://avatars.githubusercontent.com/u/66799406?v=4", "events_url": "https://api.github.com/users/Mustapha-AJEGHRIR/events{/privacy}", "followers_url": "https://api.github.com/users/Mustapha-AJEGHRIR/followers", "following_url": "https://api.github.com/users/Mustapha-AJEGHRIR/following{/other_user}", "gists_url": "https://api.github.com/users/Mustapha-AJEGHRIR/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Mustapha-AJEGHRIR", "id": 66799406, "login": "Mustapha-AJEGHRIR", "node_id": "MDQ6VXNlcjY2Nzk5NDA2", "organizations_url": "https://api.github.com/users/Mustapha-AJEGHRIR/orgs", "received_events_url": "https://api.github.com/users/Mustapha-AJEGHRIR/received_events", "repos_url": "https://api.github.com/users/Mustapha-AJEGHRIR/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Mustapha-AJEGHRIR/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Mustapha-AJEGHRIR/subscriptions", "type": "User", "url": "https://api.github.com/users/Mustapha-AJEGHRIR" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Hi @mariosasko , I have added a test for the `keep_in_memory` version. I have also removed the `Compatible with temp_seed` part in the scope of `dset_shuffled`, please verify if that makes sense." ]
2022-10-06T11:10:46Z
2022-10-07T14:35:34Z
2022-10-07T14:32:54Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5082.diff", "html_url": "https://github.com/huggingface/datasets/pull/5082", "merged_at": "2022-10-07T14:32:54Z", "patch_url": "https://github.com/huggingface/datasets/pull/5082.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5082" }
Fixing #514 . Hello @mariosasko 👋, I have implemented what you have recommanded to fix the keep in memory problem for shuffle on the issue #514 .
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5082/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5082/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5081
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5081/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5081/comments
https://api.github.com/repos/huggingface/datasets/issues/5081/events
https://github.com/huggingface/datasets/issues/5081
1,399,340,050
I_kwDODunzps5TaDwS
5,081
Bug loading `sentence-transformers/parallel-sentences`
{ "avatar_url": "https://avatars.githubusercontent.com/u/229382?v=4", "events_url": "https://api.github.com/users/PhilipMay/events{/privacy}", "followers_url": "https://api.github.com/users/PhilipMay/followers", "following_url": "https://api.github.com/users/PhilipMay/following{/other_user}", "gists_url": "https://api.github.com/users/PhilipMay/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/PhilipMay", "id": 229382, "login": "PhilipMay", "node_id": "MDQ6VXNlcjIyOTM4Mg==", "organizations_url": "https://api.github.com/users/PhilipMay/orgs", "received_events_url": "https://api.github.com/users/PhilipMay/received_events", "repos_url": "https://api.github.com/users/PhilipMay/repos", "site_admin": false, "starred_url": "https://api.github.com/users/PhilipMay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PhilipMay/subscriptions", "type": "User", "url": "https://api.github.com/users/PhilipMay" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "tagging @nreimers ", "The dataset is sadly not really compatible to be loaded with `load_dataset`. So far it is better to git clone it and to use the files directly.\r\n\r\nA data loading script would be needed to be added to this dataset. But this was too much overhead / not really intuitive how to create it.", "Since the dataset is a bunch of TSVs we should not need a dataset script I think.\r\n\r\nBy default it tries to load all the TSVs at once, which fails here because they don't all have the same columns (pd.read_csv uses the first line as header by default). But those files have no header ! So, to properly load any TSV file in this repo, one has to pass `names=[...]` for pd.read_csv to know which column names to use.\r\n\r\nTo fix this situation, we can either do\r\n1. replace the TSVs by TSV with column names\r\n2. OR specify the pd.read_csv kwargs as YAML in the dataset card - and `datasets` would use that by default\r\n\r\nWDTY ?", "There are more issues in the dataset.\r\nTo load OpenSubtitles I have to provide this (see `skiprows`):\r\n\r\n```python\r\ndf_os = pd.read_csv(\r\n \"./parallel-sentences/OpenSubtitles/OpenSubtitles-en-de-train.tsv.gz\", \r\n sep=\"\\t\", \r\n quoting=csv.QUOTE_NONE,\r\n header=None,\r\n names=[\"en\", \"de\"],\r\n skiprows=[540344, 9151700, 10040173, 10040199, 11314673, 11338258, 11869223, 12159297, 12251078, 12303334],\r\n)\r\n```", "What's wrong with those lines exactly ?\r\nMaybe passing `error_bad_lines=False` (and maybe `warn_bad_lines=True`) can be helpful", "> What's wrong with those lines exactly ? \r\n\r\nStuff like this: `ParserError: Error tokenizing data. C error: Expected 2 fields in line 540345, saw 3`\r\n\r\n", "> Maybe passing error_bad_lines=False (and maybe warn_bad_lines=True) can be helpful\r\n\r\nYes. That would hide the issue but not solve it.", "@nreimers WDYT about the two options mentioned above ?" ]
2022-10-06T10:47:51Z
2022-10-11T10:00:48Z
null
CONTRIBUTOR
null
null
null
## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("sentence-transformers/parallel-sentences") ``` raises this: ``` /home/phmay/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py:697: FutureWarning: the 'mangle_dupe_cols' keyword is deprecated and will be removed in a future version. Please take steps to stop the use of 'mangle_dupe_cols' return pd.read_csv(xopen(filepath_or_buffer, "rb", use_auth_token=use_auth_token), **kwargs) /home/phmay/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/download/streaming_download_manager.py:697: FutureWarning: the 'mangle_dupe_cols' keyword is deprecated and will be removed in a future version. Please take steps to stop the use of 'mangle_dupe_cols' return pd.read_csv(xopen(filepath_or_buffer, "rb", use_auth_token=use_auth_token), **kwargs) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In [4], line 1 ----> 1 dataset = load_dataset("sentence-transformers/parallel-sentences", split="train") File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/load.py:1693, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, **config_kwargs) 1690 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES 1692 # Download and prepare data -> 1693 builder_instance.download_and_prepare( 1694 download_config=download_config, 1695 download_mode=download_mode, 1696 ignore_verifications=ignore_verifications, 1697 try_from_hf_gcs=try_from_hf_gcs, 1698 use_auth_token=use_auth_token, 1699 ) 1701 # Build dataset for splits 1702 keep_in_memory = ( 1703 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 1704 ) File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/builder.py:807, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, storage_options, **download_and_prepare_kwargs) 801 if not downloaded_from_gcs: 802 prepare_split_kwargs = { 803 "file_format": file_format, 804 "max_shard_size": max_shard_size, 805 **download_and_prepare_kwargs, 806 } --> 807 self._download_and_prepare( 808 dl_manager=dl_manager, 809 verify_infos=verify_infos, 810 **prepare_split_kwargs, 811 **download_and_prepare_kwargs, 812 ) 813 # Sync info 814 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/builder.py:898, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 894 split_dict.add(split_generator.split_info) 896 try: 897 # Prepare split will record examples associated to the split --> 898 self._prepare_split(split_generator, **prepare_split_kwargs) 899 except OSError as e: 900 raise OSError( 901 "Cannot find data file. " 902 + (self.manual_download_instructions or "") 903 + "\nOriginal error:\n" 904 + str(e) 905 ) from None File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/builder.py:1513, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, max_shard_size) 1506 shard_id += 1 1507 writer = writer_class( 1508 features=writer._features, 1509 path=fpath.replace("SSSSS", f"{shard_id:05d}"), 1510 storage_options=self._fs.storage_options, 1511 embed_local_files=embed_local_files, 1512 ) -> 1513 writer.write_table(table) 1514 finally: 1515 num_shards = shard_id + 1 File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/arrow_writer.py:540, in ArrowWriter.write_table(self, pa_table, writer_batch_size) 538 if self.pa_writer is None: 539 self._build_writer(inferred_schema=pa_table.schema) --> 540 pa_table = table_cast(pa_table, self._schema) 541 if self.embed_local_files: 542 pa_table = embed_table_storage(pa_table) File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/table.py:2044, in table_cast(table, schema) 2032 """Improved version of pa.Table.cast. 2033 2034 It supports casting to feature types stored in the schema metadata. (...) 2041 table (:obj:`pyarrow.Table`): the casted table 2042 """ 2043 if table.schema != schema: -> 2044 return cast_table_to_schema(table, schema) 2045 elif table.schema.metadata != schema.metadata: 2046 return table.replace_schema_metadata(schema.metadata) File ~/miniconda3/envs/paraphrase-mining/lib/python3.9/site-packages/datasets/table.py:2005, in cast_table_to_schema(table, schema) 2003 features = Features.from_arrow_schema(schema) 2004 if sorted(table.column_names) != sorted(features): -> 2005 raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match") 2006 arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] 2007 return pa.Table.from_arrays(arrays, schema=schema) ValueError: Couldn't cast Action taken on Parliament's resolutions: see Minutes: string Následný postup na základě usnesení Parlamentu: viz zápis: string -- schema metadata -- pandas: '{"index_columns": [{"kind": "range", "name": null, "start": 0, "' + 742 to {'Membership of Parliament: see Minutes': Value(dtype='string', id=None), 'Състав на Парламента: вж. протоколи': Value(dtype='string', id=None)} because column names don't match ``` ## Expected results no error ## Actual results error ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: - Platform: Linux - Python version: Python 3.9.13 - PyArrow version: pyarrow 9.0.0 - transformers 4.22.2 - datasets 2.5.2
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5081/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5081/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5080
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5080/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5080/comments
https://api.github.com/repos/huggingface/datasets/issues/5080/events
https://github.com/huggingface/datasets/issues/5080
1,398,849,565
I_kwDODunzps5TYMAd
5,080
Use hfh for caching
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "There is some discussion in https://github.com/huggingface/huggingface_hub/pull/1088 if it can help :)" ]
2022-10-06T05:51:58Z
2022-10-06T14:26:05Z
null
MEMBER
null
null
null
## Is your feature request related to a problem? As previously discussed in our meeting with @Wauplin and agreed on our last datasets team sync meeting, I'm investigating how `datasets` can use `hfh` for caching. ## Describe the solution you'd like Due to the peculiarities of the `datasets` cache, I would propose adopting `hfh` caching system in stages. First, we could easily start using `hfh` caching for: - dataset Python scripts - dataset READMEs - dataset infos JSON files (now deprecated) Second, we could also use `hfh` caching for data files downloaded from the Hub. Further investigation is needed for: - files downloaded from non-Hub hosts - extracted files from downloaded archive/compressed files - generated Arrow files ## Additional context Docs about the `hfh` caching system: - [Manage huggingface_hub cache-system](https://huggingface.co/docs/huggingface_hub/main/en/how-to-cache) - [Cache-system reference](https://huggingface.co/docs/huggingface_hub/main/en/package_reference/cache) The `transformers` library has already adopted `hfh` for caching. See: - huggingface/transformers#18438 - huggingface/transformers#18857 - huggingface/transformers#18966
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5080/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5080/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5079
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5079/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5079/comments
https://api.github.com/repos/huggingface/datasets/issues/5079/events
https://github.com/huggingface/datasets/pull/5079
1,398,609,305
PR_kwDODunzps5AQemi
5,079
refactor: replace AssertionError with more meaningful exceptions (#5074)
{ "avatar_url": "https://avatars.githubusercontent.com/u/20004072?v=4", "events_url": "https://api.github.com/users/galbwe/events{/privacy}", "followers_url": "https://api.github.com/users/galbwe/followers", "following_url": "https://api.github.com/users/galbwe/following{/other_user}", "gists_url": "https://api.github.com/users/galbwe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/galbwe", "id": 20004072, "login": "galbwe", "node_id": "MDQ6VXNlcjIwMDA0MDcy", "organizations_url": "https://api.github.com/users/galbwe/orgs", "received_events_url": "https://api.github.com/users/galbwe/received_events", "repos_url": "https://api.github.com/users/galbwe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/galbwe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/galbwe/subscriptions", "type": "User", "url": "https://api.github.com/users/galbwe" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-06T01:39:35Z
2022-10-07T14:35:43Z
2022-10-07T14:33:10Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5079.diff", "html_url": "https://github.com/huggingface/datasets/pull/5079", "merged_at": "2022-10-07T14:33:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/5079.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5079" }
Closes #5074 Replaces `AssertionError` in the following files with more descriptive exceptions: - `src/datasets/arrow_reader.py` - `src/datasets/builder.py` - `src/datasets/utils/version.py` The issue listed more files that needed to be fixed, but the rest of them were contained in the top-level `datasets` directory, which was removed when #4974 was merged
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5079/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5079/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5078
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5078/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5078/comments
https://api.github.com/repos/huggingface/datasets/issues/5078/events
https://github.com/huggingface/datasets/pull/5078
1,398,335,148
PR_kwDODunzps5APjkH
5,078
Fix header level in Audio docs
{ "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stevhliu", "id": 59462357, "login": "stevhliu", "node_id": "MDQ6VXNlcjU5NDYyMzU3", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "repos_url": "https://api.github.com/users/stevhliu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "type": "User", "url": "https://api.github.com/users/stevhliu" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-05T20:22:44Z
2022-10-06T08:12:23Z
2022-10-06T08:09:41Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5078.diff", "html_url": "https://github.com/huggingface/datasets/pull/5078", "merged_at": "2022-10-06T08:09:41Z", "patch_url": "https://github.com/huggingface/datasets/pull/5078.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5078" }
Fixes header level so `Dataset features` is the doc title instead of `The Audio type`: ![Screen Shot 2022-10-05 at 1 22 02 PM](https://user-images.githubusercontent.com/59462357/194155840-eeb5d62f-f4eb-411e-b281-8494c5fffdce.png)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5078/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5078/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5077
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5077/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5077/comments
https://api.github.com/repos/huggingface/datasets/issues/5077/events
https://github.com/huggingface/datasets/pull/5077
1,398,080,859
PR_kwDODunzps5AOs9L
5,077
Fix passed download_config in HubDatasetModuleFactoryWithoutScript
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-05T16:42:36Z
2022-10-06T05:31:22Z
2022-10-06T05:29:06Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5077.diff", "html_url": "https://github.com/huggingface/datasets/pull/5077", "merged_at": "2022-10-06T05:29:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/5077.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5077" }
Fix passed `download_config` in `HubDatasetModuleFactoryWithoutScript`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5077/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5077/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5076
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5076/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5076/comments
https://api.github.com/repos/huggingface/datasets/issues/5076/events
https://github.com/huggingface/datasets/pull/5076
1,397,918,092
PR_kwDODunzps5AOJp7
5,076
fix: update exception throw from OSError to EnvironmentError in `push…
{ "avatar_url": "https://avatars.githubusercontent.com/u/29496999?v=4", "events_url": "https://api.github.com/users/rahulXs/events{/privacy}", "followers_url": "https://api.github.com/users/rahulXs/followers", "following_url": "https://api.github.com/users/rahulXs/following{/other_user}", "gists_url": "https://api.github.com/users/rahulXs/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rahulXs", "id": 29496999, "login": "rahulXs", "node_id": "MDQ6VXNlcjI5NDk2OTk5", "organizations_url": "https://api.github.com/users/rahulXs/orgs", "received_events_url": "https://api.github.com/users/rahulXs/received_events", "repos_url": "https://api.github.com/users/rahulXs/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rahulXs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rahulXs/subscriptions", "type": "User", "url": "https://api.github.com/users/rahulXs" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-05T14:46:29Z
2022-10-07T14:35:57Z
2022-10-07T14:33:27Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5076.diff", "html_url": "https://github.com/huggingface/datasets/pull/5076", "merged_at": "2022-10-07T14:33:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/5076.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5076" }
Status: Ready for review Description of Changes: Fixes #5075 Changes proposed in this pull request: - Throw EnvironmentError instead of OSError in `push_to_hub` when the Hub token is not present.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5076/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5076/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5075
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5075/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5075/comments
https://api.github.com/repos/huggingface/datasets/issues/5075/events
https://github.com/huggingface/datasets/issues/5075
1,397,865,501
I_kwDODunzps5TUbwd
5,075
Throw EnvironmentError when token is not present
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[ { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
null
[]
null
[ "@mariosasko I've raised a PR #5076 against this issue. Please help to review. Thanks." ]
2022-10-05T14:14:18Z
2022-10-07T14:33:28Z
2022-10-07T14:33:28Z
CONTRIBUTOR
null
null
null
Throw EnvironmentError instead of OSError ([link](https://github.com/huggingface/datasets/blob/6ad430ba0cdeeb601170f732d4bd977f5c04594d/src/datasets/arrow_dataset.py#L4306) to the line) in `push_to_hub` when the Hub token is not present.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5075/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5075/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5074
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5074/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5074/comments
https://api.github.com/repos/huggingface/datasets/issues/5074/events
https://github.com/huggingface/datasets/issues/5074
1,397,850,352
I_kwDODunzps5TUYDw
5,074
Replace AssertionErrors with more meaningful errors
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[ { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/20004072?v=4", "events_url": "https://api.github.com/users/galbwe/events{/privacy}", "followers_url": "https://api.github.com/users/galbwe/followers", "following_url": "https://api.github.com/users/galbwe/following{/other_user}", "gists_url": "https://api.github.com/users/galbwe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/galbwe", "id": 20004072, "login": "galbwe", "node_id": "MDQ6VXNlcjIwMDA0MDcy", "organizations_url": "https://api.github.com/users/galbwe/orgs", "received_events_url": "https://api.github.com/users/galbwe/received_events", "repos_url": "https://api.github.com/users/galbwe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/galbwe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/galbwe/subscriptions", "type": "User", "url": "https://api.github.com/users/galbwe" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/20004072?v=4", "events_url": "https://api.github.com/users/galbwe/events{/privacy}", "followers_url": "https://api.github.com/users/galbwe/followers", "following_url": "https://api.github.com/users/galbwe/following{/other_user}", "gists_url": "https://api.github.com/users/galbwe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/galbwe", "id": 20004072, "login": "galbwe", "node_id": "MDQ6VXNlcjIwMDA0MDcy", "organizations_url": "https://api.github.com/users/galbwe/orgs", "received_events_url": "https://api.github.com/users/galbwe/received_events", "repos_url": "https://api.github.com/users/galbwe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/galbwe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/galbwe/subscriptions", "type": "User", "url": "https://api.github.com/users/galbwe" } ]
null
[ "Hi, can I pick up this issue?", "#self-assign", "Looks like the top-level `datasource` directory was removed when https://github.com/huggingface/datasets/pull/4974 was merged, so there are 3 source files to fix." ]
2022-10-05T14:03:55Z
2022-10-07T14:33:11Z
2022-10-07T14:33:11Z
CONTRIBUTOR
null
null
null
Replace the AssertionErrors with more meaningful errors such as ValueError, TypeError, etc. The files with AssertionErrors that need to be replaced: ``` src/datasets/arrow_reader.py src/datasets/builder.py src/datasets/utils/version.py ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5074/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5074/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5073
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5073/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5073/comments
https://api.github.com/repos/huggingface/datasets/issues/5073/events
https://github.com/huggingface/datasets/pull/5073
1,397,832,183
PR_kwDODunzps5AN3Gn
5,073
Restore saved format state in `load_from_disk`
{ "avatar_url": "https://avatars.githubusercontent.com/u/74454835?v=4", "events_url": "https://api.github.com/users/asofiaoliveira/events{/privacy}", "followers_url": "https://api.github.com/users/asofiaoliveira/followers", "following_url": "https://api.github.com/users/asofiaoliveira/following{/other_user}", "gists_url": "https://api.github.com/users/asofiaoliveira/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/asofiaoliveira", "id": 74454835, "login": "asofiaoliveira", "node_id": "MDQ6VXNlcjc0NDU0ODM1", "organizations_url": "https://api.github.com/users/asofiaoliveira/orgs", "received_events_url": "https://api.github.com/users/asofiaoliveira/received_events", "repos_url": "https://api.github.com/users/asofiaoliveira/repos", "site_admin": false, "starred_url": "https://api.github.com/users/asofiaoliveira/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/asofiaoliveira/subscriptions", "type": "User", "url": "https://api.github.com/users/asofiaoliveira" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-05T13:51:47Z
2022-10-11T16:55:07Z
2022-10-11T16:49:23Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5073.diff", "html_url": "https://github.com/huggingface/datasets/pull/5073", "merged_at": "2022-10-11T16:49:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/5073.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5073" }
Hello! @mariosasko This pull request relates to issue #5050 and intends to add the format to datasets loaded from disk. All I did was add a set_format in the Dataset.load_from_disk, as DatasetDict.load_from_disk relies on the first. I don't know if I should add a test and where, so let me know if I should and I can work on that as well!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5073/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5073/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5072
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5072/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5072/comments
https://api.github.com/repos/huggingface/datasets/issues/5072/events
https://github.com/huggingface/datasets/pull/5072
1,397,765,531
PR_kwDODunzps5ANoo5
5,072
Image & Audio formatting for numpy/torch/tf/jax
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I just added a consolidation step so that numpy arrays or tensors of images are stacked together if the shapes match, instead of having lists of tensors\r\n\r\nFeel free to review @mariosasko :)", "I added a few lines in the docs and reverted the ragged numpy array change :)\r\n\r\nready for another review @mariosasko !" ]
2022-10-05T13:07:03Z
2022-10-10T13:24:10Z
2022-10-10T13:21:32Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5072.diff", "html_url": "https://github.com/huggingface/datasets/pull/5072", "merged_at": "2022-10-10T13:21:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/5072.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5072" }
Added support for image and audio formatting for numpy, torch, tf and jax. For images, the dtype used is the one of the image (the one returned by PIL.Image), e.g. uint8 I also added support for string, binary and None types. In particular for torch and jax, strings are kept unchanged (previously it was returning an error because you can't create a tensor of strings)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5072/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5072/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5071
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5071/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5071/comments
https://api.github.com/repos/huggingface/datasets/issues/5071/events
https://github.com/huggingface/datasets/pull/5071
1,397,301,270
PR_kwDODunzps5AMG3g
5,071
Support DEFAULT_CONFIG_NAME when no BUILDER_CONFIGS
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Super, thanks a lot for adding this support, Albert!" ]
2022-10-05T06:28:39Z
2022-10-06T14:43:12Z
2022-10-06T14:40:26Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5071.diff", "html_url": "https://github.com/huggingface/datasets/pull/5071", "merged_at": "2022-10-06T14:40:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/5071.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5071" }
This PR supports defining a default config name, even if no predefined allowed config names are set. Fix #5070. CC: @stas00
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5071/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5071/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5070
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5070/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5070/comments
https://api.github.com/repos/huggingface/datasets/issues/5070/events
https://github.com/huggingface/datasets/issues/5070
1,396,765,647
I_kwDODunzps5TQPPP
5,070
Support default config name when no builder configs
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "Thank you for creating this feature request, Albert.\r\n\r\nFor context this is the datatest where Albert has been helping me to switch to on-the-fly split config https://huggingface.co/datasets/HuggingFaceM4/cm4-synthetic-testing\r\n\r\nand the attempt to switch on-the-fly splits was here: https://huggingface.co/datasets/HuggingFaceM4/cm4-synthetic-testing/discussions/2/files\r\n\r\nbut which I had to revert since providing no split breaks at run time.\r\n" ]
2022-10-04T19:49:35Z
2022-10-06T14:40:26Z
2022-10-06T14:40:26Z
MEMBER
null
null
null
**Is your feature request related to a problem? Please describe.** As discussed with @stas00, we could support defining a default config name, even if no predefined allowed config names are set. That is, support `DEFAULT_CONFIG_NAME`, even when `BUILDER_CONFIGS` is not defined. **Additional context** In order to support creating configs on the fly **by name** (not using kwargs), the list of allowed builder configs `BUILDER_CONFIGS` must not be set. However, if so, then `DEFAULT_CONFIG_NAME` is not supported.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5070/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5070/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5067
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5067/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5067/comments
https://api.github.com/repos/huggingface/datasets/issues/5067/events
https://github.com/huggingface/datasets/pull/5067
1,396,361,768
PR_kwDODunzps5AI86d
5,067
Fix CONTRIBUTING once dataset scripts transferred to Hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-04T14:16:05Z
2022-10-06T06:14:43Z
2022-10-06T06:12:12Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5067.diff", "html_url": "https://github.com/huggingface/datasets/pull/5067", "merged_at": "2022-10-06T06:12:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/5067.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5067" }
This PR updates the `CONTRIBUTING.md` guide, once the all dataset scripts have been removed from the GitHub repo and transferred to the HF Hub: - #4974 See diff here: https://github.com/huggingface/datasets/commit/e3291ecff9e54f09fcee3f313f051a03fdc3d94b Additionally, this PR fixes the line separator that by some previous mistake was CRLF instead of LF.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5067/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5067/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5066
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5066/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5066/comments
https://api.github.com/repos/huggingface/datasets/issues/5066/events
https://github.com/huggingface/datasets/pull/5066
1,396,086,745
PR_kwDODunzps5AIDWj
5,066
Support streaming gzip.open
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-04T11:20:05Z
2022-10-06T15:13:51Z
2022-10-06T15:11:29Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5066.diff", "html_url": "https://github.com/huggingface/datasets/pull/5066", "merged_at": "2022-10-06T15:11:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/5066.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5066" }
This PR implements support for streaming out-of-the-box dataset scripts containing `gzip.open`. This has been a recurring issue. See, e.g.: - #5060 - #3191
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5066/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5066/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5065
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5065/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5065/comments
https://api.github.com/repos/huggingface/datasets/issues/5065/events
https://github.com/huggingface/datasets/pull/5065
1,396,003,362
PR_kwDODunzps5AHxlQ
5,065
Ci py3.10
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Does it sound good to you @albertvillanova ?" ]
2022-10-04T10:13:51Z
2022-11-29T15:28:05Z
2022-11-29T15:25:26Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5065.diff", "html_url": "https://github.com/huggingface/datasets/pull/5065", "merged_at": "2022-11-29T15:25:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/5065.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5065" }
Added a CI job for python 3.10 Some dependencies don't work on 3.10 like apache beam, so I remove them from the extras in this case. I also removed some s3 fixtures that we don't use anymore (and that don't work on 3.10 anyway)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5065/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5065/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5064
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5064/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5064/comments
https://api.github.com/repos/huggingface/datasets/issues/5064/events
https://github.com/huggingface/datasets/pull/5064
1,395,978,143
PR_kwDODunzps5AHsP0
5,064
Align signature of create/delete_repo with latest hfh
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-04T09:54:53Z
2022-10-07T17:02:11Z
2022-10-07T16:59:30Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5064.diff", "html_url": "https://github.com/huggingface/datasets/pull/5064", "merged_at": "2022-10-07T16:59:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/5064.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5064" }
This PR aligns the signature of `create_repo`/`delete_repo` with the current one in hfh, by removing deprecated `name` and `organization`, and using `repo_id` instead. Related to: - #5063 CC: @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5064/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5064/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5063
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5063/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5063/comments
https://api.github.com/repos/huggingface/datasets/issues/5063/events
https://github.com/huggingface/datasets/pull/5063
1,395,895,463
PR_kwDODunzps5AHasG
5,063
Align signature of list_repo_files with latest hfh
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-04T08:51:46Z
2022-10-07T16:42:57Z
2022-10-07T16:40:16Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5063.diff", "html_url": "https://github.com/huggingface/datasets/pull/5063", "merged_at": "2022-10-07T16:40:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/5063.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5063" }
This PR aligns the signature of `list_repo_files` with the current one in `hfh`, by renaming deprecated `token` to `use_auth_token`. This is already the case for `dataset_info`. CC: @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5063/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5063/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5062
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5062/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5062/comments
https://api.github.com/repos/huggingface/datasets/issues/5062/events
https://github.com/huggingface/datasets/pull/5062
1,395,739,417
PR_kwDODunzps5AG6SA
5,062
Fix CI hfh token warning
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "good catch !" ]
2022-10-04T06:36:54Z
2022-10-04T08:58:15Z
2022-10-04T08:42:31Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5062.diff", "html_url": "https://github.com/huggingface/datasets/pull/5062", "merged_at": "2022-10-04T08:42:31Z", "patch_url": "https://github.com/huggingface/datasets/pull/5062.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5062" }
In our CI, we get warnings from `hfh` about using deprecated `token`: https://github.com/huggingface/datasets/actions/runs/3174626525/jobs/5171672431 ``` tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_private tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_multiple_files tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_multiple_files_with_max_shard_size tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_overwrite_files C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\site-packages\huggingface_hub\utils\_deprecation.py:97: FutureWarning: Deprecated argument(s) used in 'dataset_info': token. Will not be supported from version '0.12'. warnings.warn(message, FutureWarning) ``` This PR fixes the tests in `TestPushToHub` so that we fix these warnings. Continuation of: - #5031 CC: @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5062/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5062/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5061
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5061/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5061/comments
https://api.github.com/repos/huggingface/datasets/issues/5061/events
https://github.com/huggingface/datasets/issues/5061
1,395,476,770
I_kwDODunzps5TLUki
5,061
`_pickle.PicklingError: logger cannot be pickled` in multiprocessing `map`
{ "avatar_url": "https://avatars.githubusercontent.com/u/11954789?v=4", "events_url": "https://api.github.com/users/ZhaofengWu/events{/privacy}", "followers_url": "https://api.github.com/users/ZhaofengWu/followers", "following_url": "https://api.github.com/users/ZhaofengWu/following{/other_user}", "gists_url": "https://api.github.com/users/ZhaofengWu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ZhaofengWu", "id": 11954789, "login": "ZhaofengWu", "node_id": "MDQ6VXNlcjExOTU0Nzg5", "organizations_url": "https://api.github.com/users/ZhaofengWu/orgs", "received_events_url": "https://api.github.com/users/ZhaofengWu/received_events", "repos_url": "https://api.github.com/users/ZhaofengWu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ZhaofengWu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ZhaofengWu/subscriptions", "type": "User", "url": "https://api.github.com/users/ZhaofengWu" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "This is maybe related to python 3.10, do you think you could try on 3.8 ?\r\n\r\nIn the meantime we'll keep improving the support for 3.10. Let me add a dedicated CI", "I did some binary search and seems like the root cause is either `multiprocess` or `dill`. python 3.10 is fine. Specifically:\r\n- `multiprocess==0.70.12.2, dill==0.3.4`: works\r\n- `multiprocess==0.70.12.2, dill==0.3.5.1`: doesn't work\r\n- `multiprocess==0.70.13, dill==0.3.5.1`: doesn't work\r\n- `multiprocess==0.70.13, dill==0.3.4`: can't test, `multiprocess==0.70.13` requires `dill>=0.3.5.1`\r\n\r\nI will pin their versions on my end. I don't have enough knowledge of how python multiprocessing works to debug this, but ideally there could be a fix. It's also possible that I'm doing something wrong in my code, but again the `.name` of the logger that failed to pickle is `datasets.fingerprint`, which I'm not using directly.", "Do you know which logger fails at being pickled ?", "I'm not 100% sure how to figure it out -- the stack trace above doesn't clearly give me a place where I can print out who owns the logger, etc. I only found out its `.name` is `datasets.fingerprint` by printing right before\r\n```\r\n File \".../logging/__init__.py\", line 1774, in __reduce__\r\n raise pickle.PicklingError('logger cannot be pickled')\r\n```\r\nIf you have any idea on how to find it out, please let me know.", "Ok I see, not sure why it triggers this error though, in `logging.py` the code is\r\n\r\nhttps://github.com/python/cpython/blob/c9da063e32725a66495e4047b8a5ed13e72d9e8e/Lib/logging/__init__.py#L1769-L1775\r\n\r\nand on my side it works on 3.10 with dill 0.3.5.1 and multiprocess 0.70.13\r\n```python\r\n>>> datasets.fingerprint.logger.__reduce__() \r\n(<function logging.getLogger(name=None)>, ('datasets.fingerprint',))\r\n```\r\nCould you try to run this code ?\r\n\r\nAre you in an environment where the loggers are instantiated differently ? Can you check the source code of `logging.Logger.__reduce__` in `\".../logging/__init__.py\", line 1774` ?" ]
2022-10-03T23:51:38Z
2022-10-14T16:44:54Z
null
NONE
null
null
null
## Describe the bug When I `map` with multiple processes, this error occurs. The `.name` of the `logger` that fails to pickle in the final line is `datasets.fingerprint`. ``` File "~/project/dataset.py", line 204, in <dictcomp> split: dataset.map( File ".../site-packages/datasets/arrow_dataset.py", line 2489, in map transformed_shards[index] = async_result.get() File ".../site-packages/multiprocess/pool.py", line 771, in get raise self._value File ".../site-packages/multiprocess/pool.py", line 537, in _handle_tasks put(task) File ".../site-packages/multiprocess/connection.py", line 214, in send self._send_bytes(_ForkingPickler.dumps(obj)) File ".../site-packages/multiprocess/reduction.py", line 54, in dumps cls(buf, protocol, *args, **kwds).dump(obj) File ".../site-packages/dill/_dill.py", line 620, in dump StockPickler.dump(self, obj) File ".../pickle.py", line 487, in dump self.save(obj) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../pickle.py", line 902, in save_tuple save(element) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../site-packages/dill/_dill.py", line 1963, in save_function _save_with_postproc(pickler, (_create_function, ( File ".../site-packages/dill/_dill.py", line 1140, in _save_with_postproc pickler.save_reduce(*reduction, obj=obj) File ".../pickle.py", line 717, in save_reduce save(state) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../pickle.py", line 887, in save_tuple save(element) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../site-packages/dill/_dill.py", line 1251, in save_module_dict StockPickler.save_dict(pickler, obj) File ".../pickle.py", line 972, in save_dict self._batch_setitems(obj.items()) File ".../pickle.py", line 998, in _batch_setitems save(v) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../site-packages/dill/_dill.py", line 1963, in save_function _save_with_postproc(pickler, (_create_function, ( File ".../site-packages/dill/_dill.py", line 1140, in _save_with_postproc pickler.save_reduce(*reduction, obj=obj) File ".../pickle.py", line 717, in save_reduce save(state) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../pickle.py", line 887, in save_tuple save(element) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../site-packages/dill/_dill.py", line 1251, in save_module_dict StockPickler.save_dict(pickler, obj) File ".../pickle.py", line 972, in save_dict self._batch_setitems(obj.items()) File ".../pickle.py", line 998, in _batch_setitems save(v) File ".../pickle.py", line 560, in save f(self, obj) # Call unbound method with explicit self File ".../site-packages/dill/_dill.py", line 1963, in save_function _save_with_postproc(pickler, (_create_function, ( File ".../site-packages/dill/_dill.py", line 1154, in _save_with_postproc pickler._batch_setitems(iter(source.items())) File ".../pickle.py", line 998, in _batch_setitems save(v) File ".../pickle.py", line 578, in save rv = reduce(self.proto) File ".../logging/__init__.py", line 1774, in __reduce__ raise pickle.PicklingError('logger cannot be pickled') _pickle.PicklingError: logger cannot be pickled ``` ## Steps to reproduce the bug Sorry I failed to have a minimal reproducible example, but the offending line on my end is ```python dataset.map( lambda examples: self.tokenize(examples), # this doesn't matter, lambda e: [1] * len(...) also breaks. In fact I'm pretty sure it breaks before executing this lambda batched=True, num_proc=4, ) ``` This does work when `num_proc=1`, so it's likely a multiprocessing thing. ## Expected results `map` succeeds ## Actual results The error trace above. ## Environment info - `datasets` version: 1.16.1 and 2.5.1 both failed - Platform: Ubuntu 20.04.4 LTS - Python version: 3.10.4 - PyArrow version: 9.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5061/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5061/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5060
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5060/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5060/comments
https://api.github.com/repos/huggingface/datasets/issues/5060/events
https://github.com/huggingface/datasets/issues/5060
1,395,382,940
I_kwDODunzps5TK9qc
5,060
Unable to Use Custom Dataset Locally
{ "avatar_url": "https://avatars.githubusercontent.com/u/33707069?v=4", "events_url": "https://api.github.com/users/zanussbaum/events{/privacy}", "followers_url": "https://api.github.com/users/zanussbaum/followers", "following_url": "https://api.github.com/users/zanussbaum/following{/other_user}", "gists_url": "https://api.github.com/users/zanussbaum/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zanussbaum", "id": 33707069, "login": "zanussbaum", "node_id": "MDQ6VXNlcjMzNzA3MDY5", "organizations_url": "https://api.github.com/users/zanussbaum/orgs", "received_events_url": "https://api.github.com/users/zanussbaum/received_events", "repos_url": "https://api.github.com/users/zanussbaum/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zanussbaum/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zanussbaum/subscriptions", "type": "User", "url": "https://api.github.com/users/zanussbaum" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Hi ! I opened a PR in your repo to fix this :)\r\nhttps://huggingface.co/datasets/zpn/pubchem_selfies/discussions/7\r\n\r\nbasically you need to use `open` for streaming to work properly", "Thank you so much for this! Naive question, is this a feature of `open` or have you all overloaded it to be able to read from a URL? Any links to code/documentation would be greatly appreciated, I'd love to learn more", "`datasets` extends `open` in dataset scripts to work with URLs. The builtin `open` from python only works with local files.\r\n\r\nYou can find the extension here: https://github.com/huggingface/datasets/blob/6ad430ba0cdeeb601170f732d4bd977f5c04594d/src/datasets/download/streaming_download_manager.py#L435-L451\r\n\r\nI think we can create a docs section dedicated to streaming to explain how this works", "Closing this one - feel free to reopen if you have more questions" ]
2022-10-03T21:55:16Z
2022-10-06T14:29:18Z
2022-10-06T14:29:17Z
NONE
null
null
null
## Describe the bug I have uploaded a [dataset](https://huggingface.co/datasets/zpn/pubchem_selfies) and followed the instructions from the [dataset_loader](https://huggingface.co/docs/datasets/dataset_script#download-data-files-and-organize-splits) tutorial. In that tutorial, it says ``` If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs. ``` Accordingly, I put the [relative path](https://huggingface.co/datasets/zpn/pubchem_selfies/blob/main/pubchem_selfies.py#L76) to the data to be used. I was able to test the dataset and generate the metadata locally with `datasets-cli test path/to/<your-dataset-loading-script> --save_infos --all_configs` However, if I try to load the data using `load_dataset`, I get the following error ``` with gzip.open(filepath, mode="rt") as f: File "/usr/local/Cellar/python@3.9/3.9.7_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/gzip.py", line 58, in open binary_file = GzipFile(filename, gz_mode, compresslevel) File "/usr/local/Cellar/python@3.9/3.9.7_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/gzip.py", line 173, in __init__ fileobj = self.myfileobj = builtins.open(filename, mode or 'rb') FileNotFoundError: [Errno 2] No such file or directory: 'https://huggingface.co/datasets/zpn/pubchem_selfies/resolve/main/data/Compound_021000001_021500000/Compound_021000001_021500000_SELFIES.jsonl.gz' ``` ## Steps to reproduce the bug ```python >>> from datasets import load_dataset >>> dataset = load_dataset("zpn/pubchem_selfies", streaming=True) >>> t = dataset["train"] >>> for item in t: ...... print(item) ...... break Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/zachnussbaum/env/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 723, in __iter__ for key, example in self._iter(): File "/Users/zachnussbaum/env/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 713, in _iter yield from ex_iterable File "/Users/zachnussbaum/env/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 113, in __iter__ yield from self.generate_examples_fn(**self.kwargs) File "/Users/zachnussbaum/.cache/huggingface/modules/datasets_modules/datasets/zpn--pubchem_selfies/d2571f35996765aea70fd3f3f8e3882d59c401fb738615c79282e2eb1d9f7a25/pubchem_selfies.py", line 475, in _generate_examples with gzip.open(filepath, mode="rt") as f: File "/usr/local/Cellar/python@3.9/3.9.7_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/gzip.py", line 58, in open binary_file = GzipFile(filename, gz_mode, compresslevel) File "/usr/local/Cellar/python@3.9/3.9.7_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/gzip.py", line 173, in __init__ fileobj = self.myfileobj = builtins.open(filename, mode or 'rb') FileNotFoundError: [Errno 2] No such file or directory: 'https://huggingface.co/datasets/zpn/pubchem_selfies/resolve/main/data/Compound_021000001_021500000/Compound_021000001_021500000_SELFIES.jsonl.gz' ```` ``` ## Expected results A clear and concise description of the expected results. ## Actual results Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.5.1 - Platform: macOS-12.5.1-x86_64-i386-64bit - Python version: 3.9.7 - PyArrow version: 9.0.0 - Pandas version: 1.5.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5060/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5060/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5059
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5059/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5059/comments
https://api.github.com/repos/huggingface/datasets/issues/5059/events
https://github.com/huggingface/datasets/pull/5059
1,395,050,876
PR_kwDODunzps5AEoX7
5,059
Fix typo
{ "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stevhliu", "id": 59462357, "login": "stevhliu", "node_id": "MDQ6VXNlcjU5NDYyMzU3", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "repos_url": "https://api.github.com/users/stevhliu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "type": "User", "url": "https://api.github.com/users/stevhliu" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-03T17:05:25Z
2022-10-03T17:34:40Z
2022-10-03T17:32:27Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5059.diff", "html_url": "https://github.com/huggingface/datasets/pull/5059", "merged_at": "2022-10-03T17:32:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/5059.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5059" }
Fixes a small typo :)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5059/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5059/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5058
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5058/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5058/comments
https://api.github.com/repos/huggingface/datasets/issues/5058/events
https://github.com/huggingface/datasets/pull/5058
1,394,962,424
PR_kwDODunzps5AEVWn
5,058
Mark CI tests as xfail when 502 error
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-03T15:53:55Z
2022-10-04T10:03:23Z
2022-10-04T10:01:23Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5058.diff", "html_url": "https://github.com/huggingface/datasets/pull/5058", "merged_at": "2022-10-04T10:01:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/5058.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5058" }
To make CI more robust, we could mark as xfail when the Hub raises a 502 error (besides 500 error): - FAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_to_hub_skip_identical_files - https://github.com/huggingface/datasets/actions/runs/3174626525/jobs/5171672431 ``` > raise HTTPError(http_error_msg, response=self) E requests.exceptions.HTTPError: 502 Server Error: Bad Gateway for url: https://hub-ci.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/test-16648055339047.git/info/lfs/objects/batch ``` - FAILED tests/test_upstream_hub.py::TestPushToHub::test_push_dataset_dict_to_hub_overwrite_files - https://github.com/huggingface/datasets/actions/runs/3145587033/jobs/5113074889 ``` > raise HTTPError(http_error_msg, response=self) E requests.exceptions.HTTPError: 502 Server Error: Bad Gateway for url: https://hub-ci.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/test-16643866807322.git/info/lfs/objects/verify ``` Currently, we mark as xfail when 500 error: - #4845
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5058/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5058/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5057
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5057/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5057/comments
https://api.github.com/repos/huggingface/datasets/issues/5057/events
https://github.com/huggingface/datasets/pull/5057
1,394,827,216
PR_kwDODunzps5AD4c6
5,057
Support `converters` in `CsvBuilder`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-03T14:23:21Z
2022-10-04T11:19:28Z
2022-10-04T11:17:32Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5057.diff", "html_url": "https://github.com/huggingface/datasets/pull/5057", "merged_at": "2022-10-04T11:17:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/5057.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5057" }
Add the `converters` param to `CsvBuilder`, to help in situations like [this one](https://discuss.huggingface.co/t/typeerror-in-load-dataset-related-to-a-sequence-of-strings/23545).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5057/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5057/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5056
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5056/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5056/comments
https://api.github.com/repos/huggingface/datasets/issues/5056/events
https://github.com/huggingface/datasets/pull/5056
1,394,713,173
PR_kwDODunzps5ADfxN
5,056
Fix broken URL's (GEM)
{ "avatar_url": "https://avatars.githubusercontent.com/u/6687858?v=4", "events_url": "https://api.github.com/users/manandey/events{/privacy}", "followers_url": "https://api.github.com/users/manandey/followers", "following_url": "https://api.github.com/users/manandey/following{/other_user}", "gists_url": "https://api.github.com/users/manandey/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/manandey", "id": 6687858, "login": "manandey", "node_id": "MDQ6VXNlcjY2ODc4NTg=", "organizations_url": "https://api.github.com/users/manandey/orgs", "received_events_url": "https://api.github.com/users/manandey/received_events", "repos_url": "https://api.github.com/users/manandey/repos", "site_admin": false, "starred_url": "https://api.github.com/users/manandey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/manandey/subscriptions", "type": "User", "url": "https://api.github.com/users/manandey" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5056). All of your documentation changes will be reflected on that endpoint.", "Thanks, @manandey. We have removed all dataset scripts from this repo. Subsequent PRs should be opened directly on the Hugging Face Hub." ]
2022-10-03T13:13:22Z
2022-10-04T13:49:00Z
2022-10-04T13:48:59Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5056.diff", "html_url": "https://github.com/huggingface/datasets/pull/5056", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5056.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5056" }
This PR fixes the broken URL's in GEM. cc. @lhoestq, @albertvillanova
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5056/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5056/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5055
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5055/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5055/comments
https://api.github.com/repos/huggingface/datasets/issues/5055/events
https://github.com/huggingface/datasets/pull/5055
1,394,503,844
PR_kwDODunzps5ACyVU
5,055
Fix backward compatibility for dataset_infos.json
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-03T10:30:14Z
2022-10-03T13:43:55Z
2022-10-03T13:41:32Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5055.diff", "html_url": "https://github.com/huggingface/datasets/pull/5055", "merged_at": "2022-10-03T13:41:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/5055.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5055" }
While working on https://github.com/huggingface/datasets/pull/5018 I noticed a small bug introduced in #4926 regarding backward compatibility for dataset_infos.json Indeed, when a dataset repo had both dataset_infos.json and README.md, the JSON file was ignored. This is unexpected: in practice it should be ignored only if the README.md has a dataset_info field, which has precedence over the data in the JSON file.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5055/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5055/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5054
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5054/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5054/comments
https://api.github.com/repos/huggingface/datasets/issues/5054/events
https://github.com/huggingface/datasets/pull/5054
1,394,152,728
PR_kwDODunzps5ABnd3
5,054
Fix license/citation information of squadshifts dataset card
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-10-03T05:19:13Z
2022-10-03T09:26:49Z
2022-10-03T09:24:30Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5054.diff", "html_url": "https://github.com/huggingface/datasets/pull/5054", "merged_at": "2022-10-03T09:24:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/5054.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5054" }
This PR fixes the license/citation information of squadshifts dataset card, once the dataset owners have responded to our request for information: - https://github.com/modestyachts/squadshifts-website/issues/1 Additionally, we have updated the mention in their website to our `datasets` library (they were referring old name `nlp`): - https://github.com/modestyachts/squadshifts-website/pull/2#event-7500953009
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5054/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5054/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5053
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5053/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5053/comments
https://api.github.com/repos/huggingface/datasets/issues/5053/events
https://github.com/huggingface/datasets/issues/5053
1,393,739,882
I_kwDODunzps5TEshq
5,053
Intermittent JSON parse error when streaming the Pile
{ "avatar_url": "https://avatars.githubusercontent.com/u/77788841?v=4", "events_url": "https://api.github.com/users/neelnanda-io/events{/privacy}", "followers_url": "https://api.github.com/users/neelnanda-io/followers", "following_url": "https://api.github.com/users/neelnanda-io/following{/other_user}", "gists_url": "https://api.github.com/users/neelnanda-io/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/neelnanda-io", "id": 77788841, "login": "neelnanda-io", "node_id": "MDQ6VXNlcjc3Nzg4ODQx", "organizations_url": "https://api.github.com/users/neelnanda-io/orgs", "received_events_url": "https://api.github.com/users/neelnanda-io/received_events", "repos_url": "https://api.github.com/users/neelnanda-io/repos", "site_admin": false, "starred_url": "https://api.github.com/users/neelnanda-io/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neelnanda-io/subscriptions", "type": "User", "url": "https://api.github.com/users/neelnanda-io" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "Maybe #2838 can help. In this PR we allow to skip bad chunks of JSON data to not crash the training\r\n\r\nDid you have warning messages before the error ?\r\n\r\nsomething like this maybe ?\r\n```\r\n03/24/2022 02:19:46 - WARNING - datasets.utils.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [1/20]\r\n03/24/2022 02:20:01 - WARNING - datasets.utils.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [2/20]\r\n03/24/2022 02:20:09 - ERROR - datasets.packaged_modules.json.json - Failed to read file 'gzip://file-000000000007.json::https://huggingface.co/datasets/lvwerra/codeparrot-clean-train/resolve/1d740acb9d09cf7a3307553323e2c677a6535407/file-000000000007.json.gz' with error <class 'pyarrow.lib.ArrowInvalid'>: JSON parse error: Invalid value. in row 0\r\n```", "Ah, thanks! I did get errors like that. Sad that PR wasn't merged in! \r\n\r\nI'm currently just downloading 200GB of the Pile locally to avoid streaming (I have space and it's faster anyway), but that's really useful! I can probably apply the dumb patch of just commenting out the bits that raise the JSON Parse Error lol, based on your code - if I continue the loop should it be fine?", "Yup you can get some inspiration from this PR. It simply ignores the bad chunks (a chunk is ~a few MBs of data).\r\nWe'll try to merge this PR soon" ]
2022-10-02T11:56:46Z
2022-10-04T17:59:03Z
null
NONE
null
null
null
## Describe the bug I have an intermittent error when streaming the Pile, where I get a JSON parse error which causes my program to crash. This is intermittent - when I rerun the program with the same random seed it does not crash in the same way. The exact point this happens also varied - it happened to me 11B tokens and 4 days into a training run, and now just happened 2 minutes into one, but I can't reliably reproduce it. I'm using a remote machine with 8 A6000 GPUs via runpod.io ## Expected results I have a DataLoader which can iterate through the whole Pile ## Actual results Stack trace: ``` Failed to read file 'zstd://12.jsonl::https://the-eye.eu/public/AI/pile/train/12.jsonl.zst' with error <class 'pyarrow.lib.ArrowInvalid'>: JSON parse error: Invalid value. in row 0 ``` I'm currently using HuggingFace accelerate, which also gave me the following stack trace, but I've also experienced this problem intermittently when using DataParallel, so I don't think it's to do with parallelisation ``` Traceback (most recent call last): File "ddp_script.py", line 1258, in <module> main() File "ddp_script.py", line 1143, in main for c, batch in tqdm.tqdm(enumerate(data_iter)): File "/opt/conda/lib/python3.7/site-packages/tqdm/std.py", line 1195, in __iter__ for obj in iterable: File "/opt/conda/lib/python3.7/site-packages/accelerate/data_loader.py", line 503, in __iter__ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator) File "/opt/conda/lib/python3.7/site-packages/accelerate/data_loader.py", line 454, in _fetch_batches broadcast_object_list(batch_info) File "/opt/conda/lib/python3.7/site-packages/accelerate/utils/operations.py", line 333, in broadcast_object_list torch.distributed.broadcast_object_list(object_list, src=from_process) File "/opt/conda/lib/python3.7/site-packages/torch/distributed/distributed_c10d.py", line 1900, in broadcast_object_list object_list[i] = _tensor_to_object(obj_view, obj_size) File "/opt/conda/lib/python3.7/site-packages/torch/distributed/distributed_c10d.py", line 1571, in _tensor_to_object return _unpickler(io.BytesIO(buf)).load() _pickle.UnpicklingError: invalid load key, '@'. ``` ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset( cfg["dataset_name"], streaming=True, split="train") dataset = dataset.remove_columns("meta") dataset = dataset.map(tokenize_and_concatenate, batched=True) dataset = dataset.with_format(type="torch") train_data_loader = DataLoader( dataset, batch_size=cfg["batch_size"], num_workers=3) for batch in train_data_loader: continue ``` `tokenize_and_concatenate` is a custom tokenization function I defined on the GPT-NeoX tokenizer to tokenize the text, separated by endoftext tokens, and reshape to have length batch_size, I don't think this is related to tokenization: ``` import numpy as np import einops import torch def tokenize_and_concatenate(examples): texts = examples["text"] full_text = tokenizer.eos_token.join(texts) div = 20 length = len(full_text) // div text_list = [full_text[i * length: (i + 1) * length] for i in range(div)] tokens = tokenizer(text_list, return_tensors="np", padding=True)[ "input_ids" ].flatten() tokens = tokens[tokens != tokenizer.pad_token_id] n = len(tokens) curr_batch_size = n // (seq_len - 1) tokens = tokens[: (seq_len - 1) * curr_batch_size] tokens = einops.rearrange( tokens, "(batch_size seq) -> batch_size seq", batch_size=curr_batch_size, seq=seq_len - 1, ) prefix = np.ones((curr_batch_size, 1), dtype=np.int64) * \ tokenizer.bos_token_id return { "text": np.concatenate([prefix, tokens], axis=1) } ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: Linux-5.4.0-105-generic-x86_64-with-debian-buster-sid - Python version: 3.7.13 - PyArrow version: 9.0.0 - Pandas version: 1.3.5 ZStandard data: Version: 0.18.0 Summary: Zstandard bindings for Python Home-page: https://github.com/indygreg/python-zstandard Author: Gregory Szorc Author-email: gregory.szorc@gmail.com License: BSD Location: /opt/conda/lib/python3.7/site-packages Requires: Required-by:
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5053/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5053/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5052
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5052/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5052/comments
https://api.github.com/repos/huggingface/datasets/issues/5052/events
https://github.com/huggingface/datasets/pull/5052
1,393,076,765
PR_kwDODunzps4_-PZw
5,052
added from_generator method to IterableDataset class.
{ "avatar_url": "https://avatars.githubusercontent.com/u/56002455?v=4", "events_url": "https://api.github.com/users/hamid-vakilzadeh/events{/privacy}", "followers_url": "https://api.github.com/users/hamid-vakilzadeh/followers", "following_url": "https://api.github.com/users/hamid-vakilzadeh/following{/other_user}", "gists_url": "https://api.github.com/users/hamid-vakilzadeh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/hamid-vakilzadeh", "id": 56002455, "login": "hamid-vakilzadeh", "node_id": "MDQ6VXNlcjU2MDAyNDU1", "organizations_url": "https://api.github.com/users/hamid-vakilzadeh/orgs", "received_events_url": "https://api.github.com/users/hamid-vakilzadeh/received_events", "repos_url": "https://api.github.com/users/hamid-vakilzadeh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/hamid-vakilzadeh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hamid-vakilzadeh/subscriptions", "type": "User", "url": "https://api.github.com/users/hamid-vakilzadeh" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I added a test and moved the `streaming` param from `read` to `__init_`. Then, I also decided to update the `read` method of the rest of the packaged modules to account for this param. \r\n\r\n@hamid-vakilzadeh Are you OK with these changes? ", "@mariosasko these all look great! Thanks for the updates." ]
2022-09-30T22:14:05Z
2022-10-05T12:51:48Z
2022-10-05T12:10:48Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5052.diff", "html_url": "https://github.com/huggingface/datasets/pull/5052", "merged_at": "2022-10-05T12:10:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/5052.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5052" }
Hello, This resolves issues #4988. I added a method `from_generator` to class `IterableDataset`. I modified the `read` method of input stream generator to also return Iterable_dataset.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5052/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5052/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5051
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5051/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5051/comments
https://api.github.com/repos/huggingface/datasets/issues/5051/events
https://github.com/huggingface/datasets/pull/5051
1,392,559,503
PR_kwDODunzps4_8drw
5,051
Revert task removal in folder-based builders
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-30T14:50:03Z
2022-10-03T12:23:35Z
2022-10-03T12:21:31Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5051.diff", "html_url": "https://github.com/huggingface/datasets/pull/5051", "merged_at": "2022-10-03T12:21:31Z", "patch_url": "https://github.com/huggingface/datasets/pull/5051.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5051" }
Reverts the removal of `task_templates` in the folder-based builders. I also added the `AudioClassifaction` task for consistency. This is needed to fix https://github.com/huggingface/transformers/issues/19177. I think we should soon deprecate and remove the current task API (and investigate if it's possible to integrate the `train eval index` API), but we need to update the Transformers examples before that so we don't break them. cc @NielsRogge
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5051/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5051/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5050
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5050/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5050/comments
https://api.github.com/repos/huggingface/datasets/issues/5050/events
https://github.com/huggingface/datasets/issues/5050
1,392,381,882
I_kwDODunzps5S_g-6
5,050
Restore saved format state in `load_from_disk`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/74454835?v=4", "events_url": "https://api.github.com/users/asofiaoliveira/events{/privacy}", "followers_url": "https://api.github.com/users/asofiaoliveira/followers", "following_url": "https://api.github.com/users/asofiaoliveira/following{/other_user}", "gists_url": "https://api.github.com/users/asofiaoliveira/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/asofiaoliveira", "id": 74454835, "login": "asofiaoliveira", "node_id": "MDQ6VXNlcjc0NDU0ODM1", "organizations_url": "https://api.github.com/users/asofiaoliveira/orgs", "received_events_url": "https://api.github.com/users/asofiaoliveira/received_events", "repos_url": "https://api.github.com/users/asofiaoliveira/repos", "site_admin": false, "starred_url": "https://api.github.com/users/asofiaoliveira/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/asofiaoliveira/subscriptions", "type": "User", "url": "https://api.github.com/users/asofiaoliveira" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/74454835?v=4", "events_url": "https://api.github.com/users/asofiaoliveira/events{/privacy}", "followers_url": "https://api.github.com/users/asofiaoliveira/followers", "following_url": "https://api.github.com/users/asofiaoliveira/following{/other_user}", "gists_url": "https://api.github.com/users/asofiaoliveira/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/asofiaoliveira", "id": 74454835, "login": "asofiaoliveira", "node_id": "MDQ6VXNlcjc0NDU0ODM1", "organizations_url": "https://api.github.com/users/asofiaoliveira/orgs", "received_events_url": "https://api.github.com/users/asofiaoliveira/received_events", "repos_url": "https://api.github.com/users/asofiaoliveira/repos", "site_admin": false, "starred_url": "https://api.github.com/users/asofiaoliveira/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/asofiaoliveira/subscriptions", "type": "User", "url": "https://api.github.com/users/asofiaoliveira" } ]
null
[ "Hi, can I work on this?", "Hi, sure! Let us know if you need some pointers/help." ]
2022-09-30T12:40:07Z
2022-10-11T16:49:24Z
2022-10-11T16:49:24Z
CONTRIBUTOR
null
null
null
Even though we save the `format` state in `save_to_disk`, we don't restore it in `load_from_disk`. We should fix that. Reported here: https://discuss.huggingface.co/t/save-to-disk-loses-formatting-information/23815
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5050/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5050/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5049
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5049/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5049/comments
https://api.github.com/repos/huggingface/datasets/issues/5049/events
https://github.com/huggingface/datasets/pull/5049
1,392,361,381
PR_kwDODunzps4_7zOY
5,049
Add `kwargs` to `Dataset.from_generator`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-30T12:24:27Z
2022-10-03T11:00:11Z
2022-10-03T10:58:15Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5049.diff", "html_url": "https://github.com/huggingface/datasets/pull/5049", "merged_at": "2022-10-03T10:58:15Z", "patch_url": "https://github.com/huggingface/datasets/pull/5049.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5049" }
Add the `kwargs` param to `from_generator` to align it with the rest of the `from_` methods (this param allows passing custom `writer_batch_size` for instance).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5049/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5049/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5048
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5048/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5048/comments
https://api.github.com/repos/huggingface/datasets/issues/5048/events
https://github.com/huggingface/datasets/pull/5048
1,392,170,680
PR_kwDODunzps4_7KI2
5,048
Fix bug with labels of eurlex config of lex_glue dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/1626984?v=4", "events_url": "https://api.github.com/users/iliaschalkidis/events{/privacy}", "followers_url": "https://api.github.com/users/iliaschalkidis/followers", "following_url": "https://api.github.com/users/iliaschalkidis/following{/other_user}", "gists_url": "https://api.github.com/users/iliaschalkidis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/iliaschalkidis", "id": 1626984, "login": "iliaschalkidis", "node_id": "MDQ6VXNlcjE2MjY5ODQ=", "organizations_url": "https://api.github.com/users/iliaschalkidis/orgs", "received_events_url": "https://api.github.com/users/iliaschalkidis/received_events", "repos_url": "https://api.github.com/users/iliaschalkidis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/iliaschalkidis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/iliaschalkidis/subscriptions", "type": "User", "url": "https://api.github.com/users/iliaschalkidis" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "@JamesLYC88 here is the fix! Thanks again!", "Thanks, @albertvillanova. When do you expect that this change will take effect when someone downloads the dataset?", "The change is immediately available now, since this change we made to our library:\r\n- #4059" ]
2022-09-30T09:47:12Z
2022-09-30T16:30:25Z
2022-09-30T16:21:41Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5048.diff", "html_url": "https://github.com/huggingface/datasets/pull/5048", "merged_at": "2022-09-30T16:21:41Z", "patch_url": "https://github.com/huggingface/datasets/pull/5048.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5048" }
Fix for a critical bug in the EURLEX dataset label list to make LexGLUE EURLEX results replicable. In LexGLUE (Chalkidis et al., 2022), the following is mentioned w.r.t. EUR-LEX: _"It supports four different label granularities, comprising 21, 127, 567, 7390 EuroVoc concepts, respectively. We use the 100 most frequent concepts from level 2 [...]”._ The current label list has all 127 labels, which leads to different (lower) results, as communicated by users. Thanks!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5048/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5048/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5047
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5047/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5047/comments
https://api.github.com/repos/huggingface/datasets/issues/5047/events
https://github.com/huggingface/datasets/pull/5047
1,392,088,398
PR_kwDODunzps4_64bS
5,047
Fix cats_vs_dogs
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-30T08:47:29Z
2022-09-30T10:23:22Z
2022-09-30T09:34:28Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5047.diff", "html_url": "https://github.com/huggingface/datasets/pull/5047", "merged_at": "2022-09-30T09:34:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/5047.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5047" }
Reported in https://github.com/huggingface/datasets/pull/3878 I updated the number of examples
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5047/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5047/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5046
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5046/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5046/comments
https://api.github.com/repos/huggingface/datasets/issues/5046/events
https://github.com/huggingface/datasets/issues/5046
1,391,372,519
I_kwDODunzps5S7qjn
5,046
Audiofolder creates empty Dataset if files same level as metadata
{ "avatar_url": "https://avatars.githubusercontent.com/u/577139?v=4", "events_url": "https://api.github.com/users/msis/events{/privacy}", "followers_url": "https://api.github.com/users/msis/followers", "following_url": "https://api.github.com/users/msis/following{/other_user}", "gists_url": "https://api.github.com/users/msis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/msis", "id": 577139, "login": "msis", "node_id": "MDQ6VXNlcjU3NzEzOQ==", "organizations_url": "https://api.github.com/users/msis/orgs", "received_events_url": "https://api.github.com/users/msis/received_events", "repos_url": "https://api.github.com/users/msis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/msis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/msis/subscriptions", "type": "User", "url": "https://api.github.com/users/msis" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" }, { "color": "DF8D62", "default": false, "description": "", "id": 4614514401, "name": "hacktoberfest", "node_id": "LA_kwDODunzps8AAAABEwvm4Q", "url": "https://api.github.com/repos/huggingface/datasets/labels/hacktoberfest" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/9295277?v=4", "events_url": "https://api.github.com/users/riccardobucco/events{/privacy}", "followers_url": "https://api.github.com/users/riccardobucco/followers", "following_url": "https://api.github.com/users/riccardobucco/following{/other_user}", "gists_url": "https://api.github.com/users/riccardobucco/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/riccardobucco", "id": 9295277, "login": "riccardobucco", "node_id": "MDQ6VXNlcjkyOTUyNzc=", "organizations_url": "https://api.github.com/users/riccardobucco/orgs", "received_events_url": "https://api.github.com/users/riccardobucco/received_events", "repos_url": "https://api.github.com/users/riccardobucco/repos", "site_admin": false, "starred_url": "https://api.github.com/users/riccardobucco/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riccardobucco/subscriptions", "type": "User", "url": "https://api.github.com/users/riccardobucco" } ]
null
[ "Hi! Unfortunately, I can't reproduce this behavior. Instead, I get `ValueError: audio at 2063_fe9936e7-62b2-4e62-a276-acbd344480ce_1.wav doesn't have metadata in /audio-data/metadata.csv`, which can be fixed by removing the `./` from the file name.\r\n\r\n(Link to a Colab that tries to reproduce this behavior: https://colab.research.google.com/drive/1IhQzULYi0Van1xLrN_SddBX1JF7mLZZK?usp=sharing)", "I think we can make the file name matching part more robust by replacing `file_name` with `os.path.normpath(file_name)`, to ignore \"./\" among other things, in these two places:\r\n* https://github.com/huggingface/datasets/blob/85cd129bde605cd9acacdff0d065fc02e39e09b1/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py#L319\r\n* https://github.com/huggingface/datasets/blob/85cd129bde605cd9acacdff0d065fc02e39e09b1/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py#L388", "@mariosasko Some tests failed (see my PR). Any thoughts on that?", "Yes, I mentioned the solution in my review.", "I realized what I was doing wrong.\r\n\r\nThe documentation puts the files in a subfolder.\r\nOnce I have done that, it worked.\r\n\r\nBut l agree that this should be handled better if possible." ]
2022-09-29T19:17:23Z
2022-10-28T13:05:07Z
2022-10-28T13:05:07Z
NONE
null
null
null
## Describe the bug When audio files are at the same level as the metadata (`metadata.csv` or `metadata.jsonl` ), the `load_dataset` returns a `DatasetDict` with no rows but the correct columns. https://github.com/huggingface/datasets/blob/1ea4d091b7a4b83a85b2eeb8df65115d39af3766/docs/source/audio_dataset.mdx?plain=1#L88 ## Steps to reproduce the bug `metadata.csv`: ```csv file_name,duration,transcription ./2063_fe9936e7-62b2-4e62-a276-acbd344480ce_1.wav,10.768,hello ``` ```python >>> audio_dataset = load_dataset("audiofolder", data_dir="/audio-data/") >>> audio_dataset DatasetDict({ train: Dataset({ features: ['audio', 'duration', 'transcription'], num_rows: 0 }) validation: Dataset({ features: ['audio', 'duration', 'transcription'], num_rows: 0 }) }) ``` I've tried, with no success,: - setting `split` to something else so I don't get a `DatasetDict`, - removing the `./`, - using `.jsonl`. ## Expected results ``` Dataset({ features: ['audio', 'duration', 'transcription'], num_rows: 1 }) ``` ## Actual results ``` DatasetDict({ train: Dataset({ features: ['audio', 'duration', 'transcription'], num_rows: 0 }) validation: Dataset({ features: ['audio', 'duration', 'transcription'], num_rows: 0 }) }) ``` ## Environment info - `datasets` version: 2.5.1 - Platform: Linux-5.13.0-1025-aws-x86_64-with-glibc2.29 - Python version: 3.8.10 - PyArrow version: 9.0.0 - Pandas version: 1.5.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5046/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5046/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5045
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5045/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5045/comments
https://api.github.com/repos/huggingface/datasets/issues/5045/events
https://github.com/huggingface/datasets/issues/5045
1,391,287,609
I_kwDODunzps5S7V05
5,045
Automatically revert to last successful commit to hub when a push_to_hub is interrupted
{ "avatar_url": "https://avatars.githubusercontent.com/u/13120204?v=4", "events_url": "https://api.github.com/users/jorahn/events{/privacy}", "followers_url": "https://api.github.com/users/jorahn/followers", "following_url": "https://api.github.com/users/jorahn/following{/other_user}", "gists_url": "https://api.github.com/users/jorahn/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jorahn", "id": 13120204, "login": "jorahn", "node_id": "MDQ6VXNlcjEzMTIwMjA0", "organizations_url": "https://api.github.com/users/jorahn/orgs", "received_events_url": "https://api.github.com/users/jorahn/received_events", "repos_url": "https://api.github.com/users/jorahn/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jorahn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jorahn/subscriptions", "type": "User", "url": "https://api.github.com/users/jorahn" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[ "Could you share the error you got please ? Maybe the full stack trace if you have it ?\r\n\r\nMaybe `push_to_hub` be implemented as a single commit @Wauplin ? This way if it fails, the repo is still at the previous (valid) state instead of ending-up in an invalid/incimplete state.", "> Maybe push_to_hub be implemented as a single commit ? \r\n\r\nI think that would definitely be the way to go. Do you know the reasons why not implementing it like this in the first place ? I guess it is because of not been able to upload all at once with `huggingface_hub` but if there was another reason, please let me know.\r\nAbout pushing all at once, it seems to be a more and more requested feature. I have created this issue https://github.com/huggingface/huggingface_hub/issues/1085 recently but other discussions already happened in the past. The `moon-landing` team is working on it (cc @coyotte508). The `huggingface_hub` integration will come afterwards.\r\n\r\nFor now, maybe it's best to wait for a proper implementation instead of creating a temporary workaround :)\r\n", "> I think that would definitely be the way to go. Do you know the reasons why not implementing it like this in the first place ? I guess it is because of not been able to upload all at once with huggingface_hub but if there was another reason, please let me know.\r\n\r\nIdeally we would want to upload the files iteratively - and then once everything is uploaded we proceed to commit. When we implemented `push_to_hub`, using `upload_file` for each shard was the only option.\r\n\r\nFor more context: for each shard to upload we do:\r\n1. load the arrow shard in memory\r\n2. convert to parquet\r\n3. upload\r\n\r\nSo to avoid OOM we need to upload the files iteratively.\r\n\r\n> For now, maybe it's best to wait for a proper implementation instead of creating a temporary workaround :)\r\n\r\nLet us know if we can help !", "> Ideally we would want to upload the files iteratively - and then once everything is uploaded we proceed to commit. \r\n\r\nOh I see. So maybe this has to be done in an implementation specific to `datasets/` as it is not a very common case (upload a bunch of files on the fly).\r\n\r\nYou can maybe have a look at how `huggingface_hub` is implemented for LFS files (arrow shards are LFS anyway, right?).\r\nIn [`upload_lfs_files`](https://github.com/huggingface/huggingface_hub/blob/e28646c977fc9304a4c3576ce61ff07f9778950b/src/huggingface_hub/_commit_api.py#L164) LFS files are uploaded 1 by 1 (multithreaded) and then [the commit is pushed](https://github.com/huggingface/huggingface_hub/blob/e28646c977fc9304a4c3576ce61ff07f9778950b/src/huggingface_hub/hf_api.py#L1926) to the Hub once all files have been uploaded. This is pretty much what you need, right ?\r\n\r\nI can help you if you have questions how to do it in `datasets`. If that makes sense we could then move the implementation from `datasets` to `huggingface_hub` once it's mature. Next week I'm on holidays but feel free to start without my input.\r\n\r\n(also cc @coyotte508 and @SBrandeis who implemented LFS upload in `hfh`)", "> Could you share the error you got please ? Maybe the full stack trace if you have it ?\r\n\r\nHere’s part of the stack trace, that I can reproduce at the moment from a photo I took (potential typos from OCR):\r\n```\r\nValueError\r\nTraceback (most recent call last)\r\n<ipython-input-4-274613b7d3f5> in <module>\r\nfrom datasets import load dataset\r\nds = load_dataset('jrahn/chessv6', use_auth_token-True)\r\n\r\n/us/local/1ib/python3.7/dist-packages/datasets/table.py in cast_table _to_schema (table, schema)\r\nLine 2005 raise ValueError()\r\n\r\nValueError: Couldn't cast \r\nfen: string \r\nmove: string \r\nres: string \r\neco: string \r\nmove_id: int64\r\nres_num: int64 to\r\n{ 'fen': Value(dtype='string', id=None), \r\n'move': Value(dtype=' string', id=None),\r\n'res': Value(dtype='string', id=None),\r\n'eco': Value(dtype='string', id=None), \r\n'hc': Value(dtype='string', id=None), \r\n'move_ id': Value(dtype='int64', id=None),\r\n'res_num': Value(dtype= 'int64' , id=None) }\r\nbecause column names don't match \r\n```\r\n\r\nThe column 'hc' was removed before the interrupted push_to_hub(). It appears in the column list in curly brackets but not in the column list above.\r\n\r\nLet me know, if I can be of any help." ]
2022-09-29T18:08:12Z
2022-09-30T16:49:21Z
null
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** I pushed a modification of a large dataset (remove a column) to the hub. The push was interrupted after some files were committed to the repo. This left the dataset to raise an error on load_dataset() (ValueError couldn’t cast … because column names don’t match). Only by specifying the previous (complete) commit as revision=commit_hash in load_data(), I was able to repair this and after a successful, complete push, the dataset loads without error again. **Describe the solution you'd like** Would it make sense to detect an incomplete push_to_hub() and automatically revert to the previous commit/revision? **Describe alternatives you've considered** Leave everything as is, the revision parameter in load_dataset() allows to manually fix this problem. **Additional context** Provide useful defaults
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5045/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5045/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5044
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5044/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5044/comments
https://api.github.com/repos/huggingface/datasets/issues/5044/events
https://github.com/huggingface/datasets/issues/5044
1,391,242,908
I_kwDODunzps5S7K6c
5,044
integrate `load_from_disk` into `load_dataset`
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[ "I agree the situation is not ideal and it would be awesome to use `load_dataset` to reload a dataset saved locally !\r\n\r\nFor context:\r\n\r\n- `load_dataset` works in three steps: download the dataset, then prepare it as an arrow dataset, and finally return a memory mapped arrow dataset. In particular it creates a cache directory to store the arrow data and the subsequent cache files for `map`.\r\n\r\n- `load_from_disk` directly returns a memory mapped dataset from the arrow file (similar to `Dataset.from_file`). It doesn't create a cache diretory, instead all the subsequent `map` calls write in the same directory as the original data. \r\n\r\nIf we want to keep the download_and_prepare step for consistency, it would unnecessarily copy the arrow data into the datasets cache. On the other hand if we don't do this step, the cache directory doesn't exist which is inconsistent.\r\n\r\nI'm curious, what would you expect to happen in this situation ?", "Thank you for the detailed breakdown, @lhoestq \r\n\r\n> I'm curious, what would you expect to happen in this situation ?\r\n\r\n1. the simplest solution is to add a flag to the dataset saved by `save_to_disk` and have `load_dataset` check that flag - if it's set simply switch control to `load_from_disk` behind the scenes. So `load_dataset` detects it's a local filesystem, looks inside to see whether it's something it can cache or whether it should use it directly as is and continues accordingly with one of the 2 dataset-type specific APIs.\r\n\r\n2. the more evolved solution is to look at a dataset produced by `save_to_disk` as a remote resource like hub. So the first time `load_dataset` sees it, it'll take a fingerprint and create a normal cached dataset. On subsequent uses it'll again discover it as a remote resource, validate that it has it cached via the fingerprint and serve as a normal dataset. \r\n\r\nAs you said the cons of approach 2 is that if the dataset is huge it'll make 2 copies on the same machine. So it's possible that both approaches can be integrated. Say if `save_to_disc(do_not_cache=True)` is passed it'll use solution 1, otherwise solution 2. or could even symlink the huge arrow files to the cache instead? or perhaps it's more intuitive to use `load_dataset(do_not_cache=True)` instead. So that one can choose whether to make a cached copy or not for the locally saved dataset. i.e. a simple at use point user control.\r\n\r\nSurely there are other ways to handle it, this is just one possibility.\r\n", "I think the simplest is to always memory map the local file without copy, but still have a cached directory in the cache at `~/.cache/huggingface` instead of saving `map` results next to the original data.\r\n\r\nIn practice we can even use symlinks if it makes the implementation simpler", "Yes, so that you always have the cached entry for any dataset, but the \"payload\" doesn't have to be physically in the cache if it's already on the local filesystem. As you said a symlink will do. " ]
2022-09-29T17:37:12Z
2022-09-30T16:59:19Z
null
MEMBER
null
null
null
**Is your feature request related to a problem? Please describe.** Is it possible to make `load_dataset` more universal similar to `from_pretrained` in `transformers` so that it can handle the hub, and the local path datasets of all supported types? Currently one has to choose a different loader depending on how the dataset has been created. e.g. this won't work: ``` $ git clone https://huggingface.co/datasets/severo/test-parquet $ python -c 'from datasets import load_dataset; ds=load_dataset("test-parquet"); \ ds.save_to_disk("my_dataset"); load_dataset("my_dataset")' [...] Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/load.py", line 1746, in load_dataset builder_instance.download_and_prepare( File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/builder.py", line 704, in download_and_prepare self._download_and_prepare( File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/builder.py", line 793, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/builder.py", line 1277, in _prepare_split writer.write_table(table) File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/arrow_writer.py", line 524, in write_table pa_table = table_cast(pa_table, self._schema) File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/table.py", line 2005, in table_cast return cast_table_to_schema(table, schema) File "/home/stas/anaconda3/envs/py38-pt112/lib/python3.8/site-packages/datasets/table.py", line 1968, in cast_table_to_schema raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match") ValueError: Couldn't cast _data_files: list<item: struct<filename: string>> child 0, item: struct<filename: string> child 0, filename: string ``` both times the dataset is being loaded from disk. Why does it fail the second time? Why can't `save_to_disk` generate a dataset that can be immediately loaded by `load_dataset`? e.g. the simplest hack would be to have `save_to_disk` add some flag to the saved dataset, that tells `load_dataset` to internally call `load_from_disk`. like having `save_to_disk` create a `load_me_with_load_from_disk.txt` file ;) and `load_dataset` will support that feature from saved datasets from new `datasets` versions. The old ones will still need to use `load_from_disk` explicitly. Unless the flag is not needed and one can immediately tell by looking at the saved dataset that it was saved via `save_to_disk` and thus use `load_from_disk` internally. The use-case is defining a simple API where the user only ever needs to pass a `dataset_name_or_path` and it will always just work. Currently one needs to manually add additional switches telling the system whether to use one loading method or the other which works but it's not smooth. Thank you!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5044/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5044/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5043
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5043/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5043/comments
https://api.github.com/repos/huggingface/datasets/issues/5043/events
https://github.com/huggingface/datasets/pull/5043
1,391,141,773
PR_kwDODunzps4_3uzy
5,043
Fix `flatten_indices` with empty indices mapping
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-29T16:17:28Z
2022-09-30T15:46:39Z
2022-09-30T15:44:25Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5043.diff", "html_url": "https://github.com/huggingface/datasets/pull/5043", "merged_at": "2022-09-30T15:44:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/5043.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5043" }
Fix #5038
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5043/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5043/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5042
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5042/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5042/comments
https://api.github.com/repos/huggingface/datasets/issues/5042/events
https://github.com/huggingface/datasets/pull/5042
1,390,762,877
PR_kwDODunzps4_2eqa
5,042
Update swiss judgment prediction
{ "avatar_url": "https://avatars.githubusercontent.com/u/3775944?v=4", "events_url": "https://api.github.com/users/JoelNiklaus/events{/privacy}", "followers_url": "https://api.github.com/users/JoelNiklaus/followers", "following_url": "https://api.github.com/users/JoelNiklaus/following{/other_user}", "gists_url": "https://api.github.com/users/JoelNiklaus/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JoelNiklaus", "id": 3775944, "login": "JoelNiklaus", "node_id": "MDQ6VXNlcjM3NzU5NDQ=", "organizations_url": "https://api.github.com/users/JoelNiklaus/orgs", "received_events_url": "https://api.github.com/users/JoelNiklaus/received_events", "repos_url": "https://api.github.com/users/JoelNiklaus/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JoelNiklaus/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JoelNiklaus/subscriptions", "type": "User", "url": "https://api.github.com/users/JoelNiklaus" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-29T12:10:02Z
2022-09-30T07:14:00Z
2022-09-29T14:32:02Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5042.diff", "html_url": "https://github.com/huggingface/datasets/pull/5042", "merged_at": "2022-09-29T14:32:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/5042.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5042" }
I forgot to add the new citation.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5042/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5042/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5041
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5041/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5041/comments
https://api.github.com/repos/huggingface/datasets/issues/5041/events
https://github.com/huggingface/datasets/pull/5041
1,390,722,230
PR_kwDODunzps4_2WES
5,041
Support streaming hendrycks_test dataset.
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-29T11:37:58Z
2022-09-30T07:13:38Z
2022-09-29T12:07:29Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5041.diff", "html_url": "https://github.com/huggingface/datasets/pull/5041", "merged_at": "2022-09-29T12:07:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/5041.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5041" }
This PR: - supports streaming - fixes the description section of the dataset card
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5041/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5041/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5040
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5040/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5040/comments
https://api.github.com/repos/huggingface/datasets/issues/5040/events
https://github.com/huggingface/datasets/pull/5040
1,390,566,428
PR_kwDODunzps4_11O2
5,040
Fix NonMatchingChecksumError in hendrycks_test dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-29T09:37:43Z
2022-09-29T10:06:22Z
2022-09-29T10:04:19Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5040.diff", "html_url": "https://github.com/huggingface/datasets/pull/5040", "merged_at": "2022-09-29T10:04:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/5040.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5040" }
Update metadata JSON. Fix #5039.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5040/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5040/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5039
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5039/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5039/comments
https://api.github.com/repos/huggingface/datasets/issues/5039/events
https://github.com/huggingface/datasets/issues/5039
1,390,353,315
I_kwDODunzps5S3xuj
5,039
Hendrycks Checksum
{ "avatar_url": "https://avatars.githubusercontent.com/u/9974388?v=4", "events_url": "https://api.github.com/users/DanielHesslow/events{/privacy}", "followers_url": "https://api.github.com/users/DanielHesslow/followers", "following_url": "https://api.github.com/users/DanielHesslow/following{/other_user}", "gists_url": "https://api.github.com/users/DanielHesslow/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/DanielHesslow", "id": 9974388, "login": "DanielHesslow", "node_id": "MDQ6VXNlcjk5NzQzODg=", "organizations_url": "https://api.github.com/users/DanielHesslow/orgs", "received_events_url": "https://api.github.com/users/DanielHesslow/received_events", "repos_url": "https://api.github.com/users/DanielHesslow/repos", "site_admin": false, "starred_url": "https://api.github.com/users/DanielHesslow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/DanielHesslow/subscriptions", "type": "User", "url": "https://api.github.com/users/DanielHesslow" }
[ { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "Thanks for reporting, @DanielHesslow. We are fixing it. ", "@albertvillanova thanks for taking care of this so quickly!", "The dataset metadata is fixed. You can download it normally." ]
2022-09-29T06:56:20Z
2022-09-29T10:23:30Z
2022-09-29T10:04:20Z
NONE
null
null
null
Hi, The checksum for [hendrycks_test](https://huggingface.co/datasets/hendrycks_test) does not compare correctly, I guess it has been updated on the remote. ``` datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://people.eecs.berkeley.edu/~hendrycks/data.tar'] ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5039/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5039/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5038
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5038/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5038/comments
https://api.github.com/repos/huggingface/datasets/issues/5038/events
https://github.com/huggingface/datasets/issues/5038
1,389,631,122
I_kwDODunzps5S1BaS
5,038
`Dataset.unique` showing wrong output after filtering
{ "avatar_url": "https://avatars.githubusercontent.com/u/4904985?v=4", "events_url": "https://api.github.com/users/mxschmdt/events{/privacy}", "followers_url": "https://api.github.com/users/mxschmdt/followers", "following_url": "https://api.github.com/users/mxschmdt/following{/other_user}", "gists_url": "https://api.github.com/users/mxschmdt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mxschmdt", "id": 4904985, "login": "mxschmdt", "node_id": "MDQ6VXNlcjQ5MDQ5ODU=", "organizations_url": "https://api.github.com/users/mxschmdt/orgs", "received_events_url": "https://api.github.com/users/mxschmdt/received_events", "repos_url": "https://api.github.com/users/mxschmdt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mxschmdt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxschmdt/subscriptions", "type": "User", "url": "https://api.github.com/users/mxschmdt" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Hi! It seems like `flatten_indices` (called in `unique`) doesn't know how to handle empty indices mappings. I'm working on the fix.", "Thanks, that was fast!" ]
2022-09-28T16:20:35Z
2022-09-30T15:44:25Z
2022-09-30T15:44:25Z
CONTRIBUTOR
null
null
null
## Describe the bug After filtering a dataset, and if no samples remain, `Dataset.unique` will return the unique values of the unfiltered dataset. ## Steps to reproduce the bug ```python from datasets import Dataset dataset = Dataset.from_dict({'id': [0]}) dataset = dataset.filter(lambda _: False) print(dataset.unique('id')) ``` ## Expected results The above code should return an empty list since the dataset is empty. ## Actual results ```bash [0] ``` ## Environment info - `datasets` version: 2.5.1 - Platform: Linux-5.18.19-100.fc35.x86_64-x86_64-with-glibc2.34 - Python version: 3.9.14 - PyArrow version: 7.0.0 - Pandas version: 1.3.5
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5038/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5038/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5037
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5037/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5037/comments
https://api.github.com/repos/huggingface/datasets/issues/5037/events
https://github.com/huggingface/datasets/pull/5037
1,389,244,722
PR_kwDODunzps4_xcp0
5,037
Improve CI performance speed of PackagedDatasetTest
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "There was a CI error which seemed unrelated: https://github.com/huggingface/datasets/actions/runs/3143581330/jobs/5111807056\r\n```\r\nFAILED tests/test_load.py::test_load_dataset_private_zipped_images[True] - FileNotFoundError: https://hub-ci.huggingface.co/datasets/__DUMMY_TRANSFORMERS_USER__/repo_zipped_img_data-16643808721979/resolve/75c3fc424a3b898a828b2b3fd84d96da4703228a/data.zip\r\n```\r\nIt disappeared after merging the main branch." ]
2022-09-28T12:08:16Z
2022-09-30T16:05:42Z
2022-09-30T16:03:24Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5037.diff", "html_url": "https://github.com/huggingface/datasets/pull/5037", "merged_at": "2022-09-30T16:03:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/5037.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5037" }
This PR improves PackagedDatasetTest CI performance speed. For Ubuntu (latest): - Duration (without parallelism) before: 334.78s (5.58m) - Duration (without parallelism) afterwards: 0.48s The approach is passing a dummy `data_files` argument to load the builder, so that it avoids the slow inferring of it over the entire root directory of the repo. ## Total duration of PackagedDatasetTest | | Before | Afterwards | Improvement |---|---:|---:|---:| | Linux | 334.78s | 0.48s | x700 | Windows | 513.02s | 1.09s | x500 ## Durations by each individual sub-test More accurate durations, running them on GitHub, for Linux (latest). Before this PR, the total test time (without parallelism) for `tests/test_dataset_common.py::PackagedDatasetTest` is 334.78s (5.58m) ``` 39.07s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_imagefolder 38.94s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_audiofolder 34.18s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_parquet 34.12s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_csv 34.00s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_pandas 34.00s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_text 33.86s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_json 10.39s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_audiofolder 6.50s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_audiofolder 6.46s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_imagefolder 6.40s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_imagefolder 5.77s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_csv 5.77s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_text 5.74s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_parquet 5.69s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_json 5.68s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_pandas 5.67s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_parquet 5.67s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_pandas 5.66s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_json 5.66s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_csv 5.55s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_configs_text (42 durations < 0.005s hidden.) ``` With this PR: 0.48s ``` 0.09s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_audiofolder 0.08s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_csv 0.08s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_imagefolder 0.06s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_json 0.05s call tests/test_dataset_common.py::PackagedDatasetTest::test_builder_class_audiofolder 0.05s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_parquet 0.04s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_pandas 0.03s call tests/test_dataset_common.py::PackagedDatasetTest::test_load_dataset_offline_text (55 durations < 0.005s hidden.) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 1, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/5037/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5037/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5036
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5036/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5036/comments
https://api.github.com/repos/huggingface/datasets/issues/5036/events
https://github.com/huggingface/datasets/pull/5036
1,389,094,075
PR_kwDODunzps4_w8Bs
5,036
Add oversampling strategy iterable datasets interleave
{ "avatar_url": "https://avatars.githubusercontent.com/u/52246514?v=4", "events_url": "https://api.github.com/users/ylacombe/events{/privacy}", "followers_url": "https://api.github.com/users/ylacombe/followers", "following_url": "https://api.github.com/users/ylacombe/following{/other_user}", "gists_url": "https://api.github.com/users/ylacombe/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ylacombe", "id": 52246514, "login": "ylacombe", "node_id": "MDQ6VXNlcjUyMjQ2NTE0", "organizations_url": "https://api.github.com/users/ylacombe/orgs", "received_events_url": "https://api.github.com/users/ylacombe/received_events", "repos_url": "https://api.github.com/users/ylacombe/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ylacombe/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ylacombe/subscriptions", "type": "User", "url": "https://api.github.com/users/ylacombe" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-28T10:10:23Z
2022-09-30T12:30:48Z
2022-09-30T12:28:23Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5036.diff", "html_url": "https://github.com/huggingface/datasets/pull/5036", "merged_at": "2022-09-30T12:28:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/5036.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5036" }
Hello everyone, Following the issue #4893 and the PR #4831, I propose here an oversampling strategy for a `IterableDataset` list. The `all_exhausted` strategy stops building the new dataset as soon as all samples in each dataset have been added at least once. It follows roughly the same logic behind #4831, namely: - if ``probabilities`` is `None` and the strategy is `all_exhausted`, it simply performs a round robin interleaving that stops when the longest dataset is out of samples. Here the new dataset length will be $maxLengthDataset*nbDataset$. - if ``probabilities`` is not `None` and the strategy is `all_exhausted`, it keeps trace of the datasets which were out of samples but continues to add them to the new dataset, and stops as soons as every dataset runs out of samples at least once. In order to be consistent and also to align with the `Dataset` behavior, please note that the behavior of the default strategy (`first_exhausted`) has been changed. Namely, it really stops when a dataset is out of samples whereas it used to stop when receiving the `StopIteration` error. To give an example of the last note, with the following snippet: ``` >>> from tests.test_iterable_dataset import * >>> d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {})) >>> d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {})) >>> d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {})) >>> dataset = interleave_datasets([d1, d2, d3]) >>> [x["a"] for x in dataset] ``` The result here will then be `[10, 0, 11, 1, 2]` instead of `[10, 0, 11, 1, 2, 20, 12, 13]`. I modified the behavior because I found it to be consistent with the under/oversampling approach and because it unified the undersampling and oversampling code, but I stay open to any suggestions.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5036/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5036/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5035
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5035/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5035/comments
https://api.github.com/repos/huggingface/datasets/issues/5035/events
https://github.com/huggingface/datasets/pull/5035
1,388,914,476
PR_kwDODunzps4_wVie
5,035
Fix typos in load docstrings and comments
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-28T08:05:07Z
2022-09-28T17:28:40Z
2022-09-28T17:26:15Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5035.diff", "html_url": "https://github.com/huggingface/datasets/pull/5035", "merged_at": "2022-09-28T17:26:14Z", "patch_url": "https://github.com/huggingface/datasets/pull/5035.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5035" }
Minor fix of typos in load docstrings and comments
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5035/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5035/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5034
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5034/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5034/comments
https://api.github.com/repos/huggingface/datasets/issues/5034/events
https://github.com/huggingface/datasets/pull/5034
1,388,855,136
PR_kwDODunzps4_wJCu
5,034
Update README.md of yahoo_answers_topics dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/6416600?v=4", "events_url": "https://api.github.com/users/borgr/events{/privacy}", "followers_url": "https://api.github.com/users/borgr/followers", "following_url": "https://api.github.com/users/borgr/following{/other_user}", "gists_url": "https://api.github.com/users/borgr/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/borgr", "id": 6416600, "login": "borgr", "node_id": "MDQ6VXNlcjY0MTY2MDA=", "organizations_url": "https://api.github.com/users/borgr/orgs", "received_events_url": "https://api.github.com/users/borgr/received_events", "repos_url": "https://api.github.com/users/borgr/repos", "site_admin": false, "starred_url": "https://api.github.com/users/borgr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borgr/subscriptions", "type": "User", "url": "https://api.github.com/users/borgr" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5034). All of your documentation changes will be reflected on that endpoint.", "Thanks, @borgr. We have removed all dataset scripts from this repo. Subsequent PRs should be opened directly on the Hugging Face Hub.", "Do you mean to edit through \"edit dataset card\" button? because it just leads to a broken page...\r\nhttps://huggingface.co/datasets/yahoo_answers_topics\r\n![image](https://user-images.githubusercontent.com/6416600/193852796-009ba537-1e8f-4c8b-898a-8c4f817b86ee.png)\r\nhttps://github.com/huggingface/datasets/tree/main/datasets/yahoo_answers_topics", "Hi @borgr, good catch! I'm going to report the button leading to a broken link.\r\n\r\nIn the meantime, you can propose a PR to the `README.md` file using this link: https://huggingface.co/datasets/yahoo_answers_topics/blob/main/README.md" ]
2022-09-28T07:17:33Z
2022-10-06T15:56:05Z
2022-10-04T13:49:25Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5034.diff", "html_url": "https://github.com/huggingface/datasets/pull/5034", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5034.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5034" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5034/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5034/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5033
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5033/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5033/comments
https://api.github.com/repos/huggingface/datasets/issues/5033/events
https://github.com/huggingface/datasets/pull/5033
1,388,842,236
PR_kwDODunzps4_wGSE
5,033
Remove redundant code from some dataset module factories
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-28T07:06:26Z
2022-09-28T16:57:51Z
2022-09-28T16:55:12Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5033.diff", "html_url": "https://github.com/huggingface/datasets/pull/5033", "merged_at": "2022-09-28T16:55:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/5033.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5033" }
This PR removes some redundant code introduced by mistake after a refactoring in: - #4576
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5033/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5033/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5032
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5032/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5032/comments
https://api.github.com/repos/huggingface/datasets/issues/5032/events
https://github.com/huggingface/datasets/issues/5032
1,388,270,935
I_kwDODunzps5Sv1VX
5,032
new dataset type: single-label and multi-label video classification
{ "avatar_url": "https://avatars.githubusercontent.com/u/34196005?v=4", "events_url": "https://api.github.com/users/fcakyon/events{/privacy}", "followers_url": "https://api.github.com/users/fcakyon/followers", "following_url": "https://api.github.com/users/fcakyon/following{/other_user}", "gists_url": "https://api.github.com/users/fcakyon/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/fcakyon", "id": 34196005, "login": "fcakyon", "node_id": "MDQ6VXNlcjM0MTk2MDA1", "organizations_url": "https://api.github.com/users/fcakyon/orgs", "received_events_url": "https://api.github.com/users/fcakyon/received_events", "repos_url": "https://api.github.com/users/fcakyon/repos", "site_admin": false, "starred_url": "https://api.github.com/users/fcakyon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fcakyon/subscriptions", "type": "User", "url": "https://api.github.com/users/fcakyon" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[ "Hi ! You can in the `features` folder how we implemented the audio and image feature types.\r\n\r\nWe can have something similar to videos. What we need to decide:\r\n- the video loading library to use\r\n- the output format when a user accesses a video type object\r\n- what parameters a `Video()` feature type needs\r\n\r\nalso cc @nateraw who also took a look at what we can do for video", "@lhoestq @nateraw is there any progress on adding video classification datasets? ", "Hi ! I think we just missing which lib we're going to use to decode the videos + which parameters must go in the `Video` type", "Hmm. `decord` could be nice but it's no longer maintained [it seems](https://github.com/dmlc/decord/issues/214). ", "pytorchvideo uses [pyav](https://github.com/PyAV-Org/PyAV) as the default decoder: https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/data/labeled_video_dataset.py#L37\r\n\r\nAlso it would be great if `optionally` audio can also be decoded from the video as in pytorchvideo: https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/data/labeled_video_dataset.py#L35\r\n\r\nHere are the other decoders supported in pytorchvideo: https://github.com/facebookresearch/pytorchvideo/blob/c8d23d8b7e597586a9e2d18f6ed31ad8aa379a7a/pytorchvideo/data/encoded_video.py#L17\r\n", "@sayakpaul I did do quite a bit of work on [this PR](https://github.com/huggingface/datasets/pull/4532) a while back to add a video feature. It's outdated, but uses my `encoded_video` [package](https://github.com/nateraw/encoded-video) under the hood, which is basically a wrapper around PyAV stolen from [pytorchvideo](https://github.com/facebookresearch/pytorchvideo/) that gets rid of the `torch` dependency. \r\n\r\nwould be really great to get something like this in...it's just a really tricky and time consuming feature to add. " ]
2022-09-27T19:40:11Z
2022-11-02T19:10:13Z
null
NONE
null
null
null
**Is your feature request related to a problem? Please describe.** In my research, I am dealing with multi-modal (audio+text+frame sequence) video classification. It would be great if the datasets library supported generating multi-modal batches from a video dataset. **Describe the solution you'd like** Assume I have video files having single/multiple labels. I want to train a single/multi-label video classification model. I want datasets to support generating multi-modal batches (audio+frame sequence) from video files. Audio waveform and frame sequence can be extracted from each video clip then I can use any audio, image and video model from transformers library to extract features which will be fed into my model. **Describe alternatives you've considered** Currently, I am using https://github.com/facebookresearch/pytorchvideo dataloaders. There seems to be not much alternative. **Additional context** I am wiling to open a PR but don't know where to start.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5032/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5032/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5031
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5031/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5031/comments
https://api.github.com/repos/huggingface/datasets/issues/5031/events
https://github.com/huggingface/datasets/pull/5031
1,388,201,146
PR_kwDODunzps4_t82_
5,031
Support hfh 0.10 implicit auth
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "@lhoestq it is now released so you can move forward with it :) ", "I took your comments into account @Wauplin :)\r\nI also bumped the requirement to 0.2.0 because we're using `set_access_token`\r\n\r\ncc @albertvillanova WDYT ? I edited the CI job to also check for our minimum supported version of hfh at the same time as the minimum pyarrow version", "@lhoestq great, thanks ! :)" ]
2022-09-27T18:37:49Z
2022-09-30T09:18:24Z
2022-09-30T09:15:59Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5031.diff", "html_url": "https://github.com/huggingface/datasets/pull/5031", "merged_at": "2022-09-30T09:15:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/5031.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5031" }
In huggingface-hub 0.10 the `token` parameter is deprecated for dataset_info and list_repo_files in favor of use_auth_token. Moreover if use_auth_token=None then the user's token is used implicitly. I took those two changes into account Close https://github.com/huggingface/datasets/issues/4990 TODO: - [x] fix tests We should wait hfh 0.10 to be relased first to make sure it works correctly before merging
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5031/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5031/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5030
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5030/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5030/comments
https://api.github.com/repos/huggingface/datasets/issues/5030/events
https://github.com/huggingface/datasets/pull/5030
1,388,061,340
PR_kwDODunzps4_tfBO
5,030
Fast dataset iter
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I ran some benchmarks (focused on the data fetching part of `__iter__`) and it seems like the combination `table.to_reader(batch_size)` + `RecordBatch.slice` performs the best ([script](https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b) with the results). I think we can choose (implicit) `batch_size=10` in the final implementation to avoid having problems with fetching large examples." ]
2022-09-27T16:44:51Z
2022-09-29T15:50:44Z
2022-09-29T15:48:17Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5030.diff", "html_url": "https://github.com/huggingface/datasets/pull/5030", "merged_at": "2022-09-29T15:48:17Z", "patch_url": "https://github.com/huggingface/datasets/pull/5030.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5030" }
Use `pa.Table.to_reader` to make iteration over examples/batches faster in `Dataset.{__iter__, map}` TODO: * [x] benchmarking (the only benchmark for now - iterating over (single) examples of `bookcorpus` (75 mil examples) in Colab is approx. 2.3x faster) * [x] check if iterating over bigger chunks + slicing to fetch individual examples in `_iter` yields better performance
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5030/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5030/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5029
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5029/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5029/comments
https://api.github.com/repos/huggingface/datasets/issues/5029/events
https://github.com/huggingface/datasets/pull/5029
1,387,600,960
PR_kwDODunzps4_r8-j
5,029
Fix import in `ClassLabel` docstring example
{ "avatar_url": "https://avatars.githubusercontent.com/u/36760800?v=4", "events_url": "https://api.github.com/users/alvarobartt/events{/privacy}", "followers_url": "https://api.github.com/users/alvarobartt/followers", "following_url": "https://api.github.com/users/alvarobartt/following{/other_user}", "gists_url": "https://api.github.com/users/alvarobartt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/alvarobartt", "id": 36760800, "login": "alvarobartt", "node_id": "MDQ6VXNlcjM2NzYwODAw", "organizations_url": "https://api.github.com/users/alvarobartt/orgs", "received_events_url": "https://api.github.com/users/alvarobartt/received_events", "repos_url": "https://api.github.com/users/alvarobartt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/alvarobartt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alvarobartt/subscriptions", "type": "User", "url": "https://api.github.com/users/alvarobartt" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-27T11:35:29Z
2022-09-27T14:03:24Z
2022-09-27T12:27:50Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5029.diff", "html_url": "https://github.com/huggingface/datasets/pull/5029", "merged_at": "2022-09-27T12:27:50Z", "patch_url": "https://github.com/huggingface/datasets/pull/5029.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5029" }
This PR addresses a super-simple fix: adding a missing `import` to the `ClassLabel` docstring example, as it was formatted as `from datasets Features`, so it's been fixed to `from datasets import Features`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5029/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5029/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5028
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5028/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5028/comments
https://api.github.com/repos/huggingface/datasets/issues/5028/events
https://github.com/huggingface/datasets/issues/5028
1,386,272,533
I_kwDODunzps5SoNcV
5,028
passing parameters to the method passed to Dataset.from_generator()
{ "avatar_url": "https://avatars.githubusercontent.com/u/64276129?v=4", "events_url": "https://api.github.com/users/Basir-mahmood/events{/privacy}", "followers_url": "https://api.github.com/users/Basir-mahmood/followers", "following_url": "https://api.github.com/users/Basir-mahmood/following{/other_user}", "gists_url": "https://api.github.com/users/Basir-mahmood/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Basir-mahmood", "id": 64276129, "login": "Basir-mahmood", "node_id": "MDQ6VXNlcjY0Mjc2MTI5", "organizations_url": "https://api.github.com/users/Basir-mahmood/orgs", "received_events_url": "https://api.github.com/users/Basir-mahmood/received_events", "repos_url": "https://api.github.com/users/Basir-mahmood/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Basir-mahmood/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Basir-mahmood/subscriptions", "type": "User", "url": "https://api.github.com/users/Basir-mahmood" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
[ "Hi! Yes, you can either use the `gen_kwargs` param in `Dataset.from_generator` (`ds = Dataset.from_generator(gen, gen_kwargs={\"param1\": val})`) or wrap the generator function with `functools.partial`\r\n(`ds = Dataset.from_generator(functools.partial(gen, param1=\"val\"))`) to pass custom parameters to it.\r\n" ]
2022-09-26T15:20:06Z
2022-10-03T13:00:00Z
2022-10-03T13:00:00Z
NONE
null
null
null
Big thanks for providing dataset creation via a generator. I want to ask whether there is any way that parameters can be passed to the method Dataset.from_generator() method, like as follows. ``` from datasets import Dataset def gen(param1): for idx in len(custom_dataset): yield custom_dataset[idx] + param1 ds = Dataset.from_generator(gen(param1)) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5028/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5028/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5027
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5027/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5027/comments
https://api.github.com/repos/huggingface/datasets/issues/5027/events
https://github.com/huggingface/datasets/pull/5027
1,386,153,072
PR_kwDODunzps4_nFUE
5,027
Fix typo in error message
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-26T14:10:09Z
2022-09-27T12:28:03Z
2022-09-27T12:26:02Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5027.diff", "html_url": "https://github.com/huggingface/datasets/pull/5027", "merged_at": "2022-09-27T12:26:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/5027.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5027" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5027/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5027/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5026
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5026/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5026/comments
https://api.github.com/repos/huggingface/datasets/issues/5026/events
https://github.com/huggingface/datasets/pull/5026
1,386,071,154
PR_kwDODunzps4_mz1w
5,026
patch CI_HUB_TOKEN_PATH with Path instead of str
{ "avatar_url": "https://avatars.githubusercontent.com/u/11801849?v=4", "events_url": "https://api.github.com/users/Wauplin/events{/privacy}", "followers_url": "https://api.github.com/users/Wauplin/followers", "following_url": "https://api.github.com/users/Wauplin/following{/other_user}", "gists_url": "https://api.github.com/users/Wauplin/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Wauplin", "id": 11801849, "login": "Wauplin", "node_id": "MDQ6VXNlcjExODAxODQ5", "organizations_url": "https://api.github.com/users/Wauplin/orgs", "received_events_url": "https://api.github.com/users/Wauplin/received_events", "repos_url": "https://api.github.com/users/Wauplin/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Wauplin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Wauplin/subscriptions", "type": "User", "url": "https://api.github.com/users/Wauplin" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-26T13:19:01Z
2022-09-26T14:30:55Z
2022-09-26T14:28:45Z
CONTRIBUTOR
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5026.diff", "html_url": "https://github.com/huggingface/datasets/pull/5026", "merged_at": "2022-09-26T14:28:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/5026.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5026" }
Should fix the tests for `huggingface_hub==0.10.0rc0` prerelease (see [failed CI](https://github.com/huggingface/datasets/actions/runs/3127805250/jobs/5074879144)). Related to [this thread](https://huggingface.slack.com/archives/C02V5EA0A95/p1664195165294559) (internal link). Note: this should be a backward compatible fix (e.g. works also with previous versions of `huggingface_hub`) I am not sure where to put the changes so feel free to cherry-pick the commit and close this one without merging. cc @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5026/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5026/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5025
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5025/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5025/comments
https://api.github.com/repos/huggingface/datasets/issues/5025/events
https://github.com/huggingface/datasets/issues/5025
1,386,011,239
I_kwDODunzps5SnNpn
5,025
Custom Json Dataset Throwing Error when batch is False
{ "avatar_url": "https://avatars.githubusercontent.com/u/21245519?v=4", "events_url": "https://api.github.com/users/jmandivarapu1/events{/privacy}", "followers_url": "https://api.github.com/users/jmandivarapu1/followers", "following_url": "https://api.github.com/users/jmandivarapu1/following{/other_user}", "gists_url": "https://api.github.com/users/jmandivarapu1/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jmandivarapu1", "id": 21245519, "login": "jmandivarapu1", "node_id": "MDQ6VXNlcjIxMjQ1NTE5", "organizations_url": "https://api.github.com/users/jmandivarapu1/orgs", "received_events_url": "https://api.github.com/users/jmandivarapu1/received_events", "repos_url": "https://api.github.com/users/jmandivarapu1/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jmandivarapu1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jmandivarapu1/subscriptions", "type": "User", "url": "https://api.github.com/users/jmandivarapu1" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Hi! Our processors are meant to be used in `batched` mode, so if `batched` is `False`, you need to drop the batch dimension (the error message warns you that the array has an extra dimension meaning it's 4D instead of 3D) to avoid the error:\r\n```python\r\ndef prepare_examples(examples):\r\n #Some preporcessing for each image and text as all my data saved in cloud\r\n #For this reason I couldn't set the batch to True. \r\n encoding = processor(img_as_tensor, words, boxes=boxes, word_labels=labels,\r\n truncation=True, padding=\"max_length\", return_tensors=\"np\")\r\n # drop extra dim\r\n for k in encoding.items():\r\n encoding[k]=encoding[k][0]\r\n return encoding\r\n```", "> Hi! Our processors are meant to be used in `batched` mode, so if `batched` is `False`, you need to drop the batch dimension (the error message warns you that the array has an extra dimension meaning it's 4D instead of 3D) to avoid the error:\r\n> \r\n> ```python\r\n> def prepare_examples(examples):\r\n> #Some preporcessing for each image and text as all my data saved in cloud\r\n> #For this reason I couldn't set the batch to True. \r\n> encoding = processor(img_as_tensor, words, boxes=boxes, word_labels=labels,\r\n> truncation=True, padding=\"max_length\", return_tensors=\"np\")\r\n> # drop extra dim\r\n> for k in encoding.items():\r\n> encoding[k]=encoding[k][0]\r\n> return encoding\r\n> ```\r\n\r\nThank you it did work\r\n\r\n```\r\nfor k,v in encoding.items():\r\n encoding[k]=encoding[k][0]\r\n```" ]
2022-09-26T12:38:39Z
2022-09-27T19:50:00Z
2022-09-27T19:50:00Z
NONE
null
null
null
## Describe the bug A clear and concise description of what the bug is. I tried to create my custom dataset using below code ``` from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D from torchvision import transforms from transformers import AutoProcessor # we'll use the Auto API here - it will load LayoutLMv3Processor behind the scenes, # based on the checkpoint we provide from the hub from datasets import load_dataset def prepare_examples(examples): #Some preporcessing for each image and text as all my data saved in cloud #For this reason I couldn't set the batch to True. encoding = processor(img_as_tensor, words, boxes=boxes, word_labels=labels, truncation=True, padding="max_length") # encoding['pixel_values']=np.array(encoding['pixel_values']) return encoding dataset = load_dataset("json", data_files='issues.jsonl') processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) features = dataset["train"].features column_names = dataset["train"].column_names # we need to define custom features for `set_format` (used later on) to work properly features = Features({ 'pixel_values': Array3D(dtype="float32", shape=(3, 224, 224)), 'input_ids': Sequence(feature=Value(dtype='int64')), 'attention_mask': Sequence(Value(dtype='int64')), 'bbox': Array2D(dtype="int64", shape=(512, 4)), 'labels': Sequence(feature=Value(dtype='int64')), }) train_dataset = dataset["train"].map( prepare_examples, batched=False, remove_columns=column_names, features=features ) ``` It throws below error. ``` /opt/conda/lib/python3.7/site-packages/datasets/arrow_writer.py in __arrow_array__(self, type) 172 storage = to_pyarrow_listarray(data, pa_type) --> 173 return pa.ExtensionArray.from_storage(pa_type, storage) 174 /opt/conda/lib/python3.7/site-packages/pyarrow/array.pxi in pyarrow.lib.ExtensionArray.from_storage() TypeError: Incompatible storage type list<item: list<item: list<item: list<item: float>>>> for extension type extension<arrow.py_extension_type<Array3DExtensionType>> ``` ## Steps to reproduce the bug ```python # Sample code to reproduce the bug ``` rom datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D from torchvision import transforms from transformers import AutoProcessor # we'll use the Auto API here - it will load LayoutLMv3Processor behind the scenes, # based on the checkpoint we provide from the hub from datasets import load_dataset def prepare_examples(examples): #Some preporcessing for each image and text as all my data saved in cloud encoding = processor(img_as_tensor, words, boxes=boxes, word_labels=labels, truncation=True, padding="max_length") # encoding['pixel_values']=np.array(encoding['pixel_values']) return encoding dataset = load_dataset("json", data_files='issues.jsonl') processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) features = dataset["train"].features column_names = dataset["train"].column_names # we need to define custom features for `set_format` (used later on) to work properly features = Features({ 'pixel_values': Array3D(dtype="float32", shape=(3, 224, 224)), 'input_ids': Sequence(feature=Value(dtype='int64')), 'attention_mask': Sequence(Value(dtype='int64')), 'bbox': Array2D(dtype="int64", shape=(512, 4)), 'labels': Sequence(feature=Value(dtype='int64')), }) train_dataset = dataset["train"].map( prepare_examples, batched=False, remove_columns=column_names, features=features ) ## Expected results A clear and concise description of the expected results. Expected would be similar to all the otherdatasets with no error. ## Actual results Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: - Platform: Unix - Python version: 3.9 - PyArrow version: 9.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5025/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5025/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5024
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5024/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5024/comments
https://api.github.com/repos/huggingface/datasets/issues/5024/events
https://github.com/huggingface/datasets/pull/5024
1,385,947,624
PR_kwDODunzps4_mZ3J
5,024
Fix string features of xcsr dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
2022-09-26T11:55:36Z
2022-09-28T07:56:18Z
2022-09-28T07:54:19Z
MEMBER
null
false
{ "diff_url": "https://github.com/huggingface/datasets/pull/5024.diff", "html_url": "https://github.com/huggingface/datasets/pull/5024", "merged_at": "2022-09-28T07:54:19Z", "patch_url": "https://github.com/huggingface/datasets/pull/5024.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5024" }
This PR fixes string features of `xcsr` dataset to avoid character splitting. Fix #5023. CC: @yangxqiao, @yuchenlin
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5024/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5024/timeline
null
null
true